Merge ^/head r320971 through r320993.

This commit is contained in:
Dimitry Andric 2017-07-14 17:38:44 +00:00
commit 03f072d13d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang500-import/; revision=320994
90 changed files with 7812 additions and 3319 deletions

View File

@ -108,7 +108,7 @@ clean:
#------------------------------------------------------------------------------
# make install is validated only for Linux, OSX, Hurd and some BSD targets
#------------------------------------------------------------------------------
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD))
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD MSYS_NT))
HOST_OS = POSIX
CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON
@ -117,30 +117,36 @@ CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_B
list:
@$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs
.PHONY: install uninstall travis-install clangtest gpptest armtest usan asan uasan
.PHONY: install clangtest gpptest armtest usan asan uasan
install:
@$(MAKE) -C $(ZSTDDIR) $@
@$(MAKE) -C $(PRGDIR) $@
.PHONY: uninstall
uninstall:
@$(MAKE) -C $(ZSTDDIR) $@
@$(MAKE) -C $(PRGDIR) $@
.PHONY: travis-install
travis-install:
$(MAKE) install PREFIX=~/install_test_dir
.PHONY: gppbuild
gppbuild: clean
g++ -v
CC=g++ $(MAKE) -C programs all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror"
.PHONY: gcc5build
gcc5build: clean
gcc-5 -v
CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror"
.PHONY: gcc6build
gcc6build: clean
gcc-6 -v
CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror"
.PHONY: clangbuild
clangbuild: clean
clang -v
CXX=clang++ CC=clang $(MAKE) all MOREFLAGS="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation"
@ -224,10 +230,10 @@ asan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address" $(MAKE) -C $(TESTDIR) $*
msan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer" # datagen.c fails this test for no obvious reason
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason
msan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer" $(MAKE) -C $(TESTDIR) $*
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) HAVE_LZMA=0 $*
asan32: clean
$(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address"
@ -236,10 +242,11 @@ uasan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined"
uasan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-Og -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" $(MAKE) -C $(TESTDIR) $*
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" $(MAKE) -C $(TESTDIR) $*
tsan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread" $(MAKE) -C $(TESTDIR) $*
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS=--no-big-tests
apt-install:
sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES)

View File

@ -1,3 +1,18 @@
v1.3.0
cli : new : `--list` command, by Paul Cruz
cli : changed : xz/lzma support enabled by default
cli : changed : `-t *` continue processing list after a decompression error
API : added : ZSTD_versionString()
API : promoted to stable status : ZSTD_getFrameContentSize(), by Sean Purcell
API exp : new advanced API : ZSTD_compress_generic(), ZSTD_CCtx_setParameter()
API exp : new : API for static or external allocation : ZSTD_initStatic?Ctx()
API exp : added : ZSTD_decompressBegin_usingDDict(), requested by Guy Riddle (#700)
API exp : clarified memory estimation / measurement functions.
API exp : changed : strongest strategy renamed ZSTD_btultra, fastest strategy ZSTD_fast set to 1
tools : decodecorpus can generate random dictionary-compressed samples, by Paul Cruz
new : contrib/seekable_format, demo and API, by Sean Purcell
changed : contrib/linux-kernel, updated version and license, by Nick Terrell
v1.2.0
cli : changed : Multithreading enabled by default (use target zstd-nomt or HAVE_THREAD=0 to disable)
cli : new : command -T0 means "detect and use nb of cores", by Sean Purcell

View File

@ -6,10 +6,17 @@ and a command line utility producing and decoding `.zst` and `.gz` files.
For other programming languages,
you can consult a list of known ports on [Zstandard homepage](http://www.zstd.net/#other-languages).
|Branch |Status |
|------------|---------|
|master | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=master)](https://travis-ci.org/facebook/zstd) |
|dev | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=dev)](https://travis-ci.org/facebook/zstd) |
| dev branch status |
|-------------------|
| [![Build Status][travisDevBadge]][travisLink] [![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink]
[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite"
[travisLink]: https://travis-ci.org/facebook/zstd
[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite"
[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0
[CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite"
[CircleLink]: https://circleci.com/gh/facebook/zstd
As a reference, several fast compression algorithms were tested and compared
on a server running Linux Debian (`Linux version 4.8.0-1-amd64`),
@ -60,11 +67,11 @@ Previous charts provide results applicable to typical file and stream scenarios
The smaller the amount of data to compress, the more difficult it is to compress. This problem is common to all compression algorithms, and reason is, compression algorithms learn from past data how to compress future data. But at the beginning of a new data set, there is no "past" to build upon.
To solve this situation, Zstd offers a __training mode__, which can be used to tune the algorithm for a selected type of data.
Training Zstandard is achieved by provide it with a few samples (one file per sample). The result of this training is stored in a file called "dictionary", which must be loaded before compression and decompression.
Training Zstandard is achieved by providing it with a few samples (one file per sample). The result of this training is stored in a file called "dictionary", which must be loaded before compression and decompression.
Using this dictionary, the compression ratio achievable on small data improves dramatically.
The following example uses the `github-users` [sample set](https://github.com/facebook/zstd/releases/tag/v1.1.3), created from [github public API](https://developer.github.com/v3/users/#get-all-users).
It consists of roughly 10K records weighting about 1KB each.
It consists of roughly 10K records weighing about 1KB each.
Compression Ratio | Compression Speed | Decompression Speed
------------------|-------------------|--------------------

View File

@ -30,12 +30,6 @@
SCRIPT: ""
TEST: "cmake"
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: ""
TEST: "pzstd"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "x64"
@ -88,12 +82,10 @@
( if [%COMPILER%]==[gcc] if [%ARTIFACT%]==[true]
lib\dll\example\build_package.bat &&
make -C programs DEBUGFLAGS= clean zstd &&
cp programs\zstd.exe zstd_%PLATFORM%.exe &&
appveyor PushArtifact zstd_%PLATFORM%.exe &&
cp programs\zstd.exe bin\zstd.exe &&
make -C programs DEBUGFLAGS= clean zstdmt &&
cp programs\zstd.exe bin\zstdmt.exe &&
cd bin\ && 7z a -tzip zstd-win-release-%PLATFORM%.zip * &&
cd programs\ && 7z a -tzip -mx9 zstd-win-binary-%PLATFORM%.zip zstd.exe &&
appveyor PushArtifact zstd-win-binary-%PLATFORM%.zip &&
cp zstd.exe ..\bin\zstd.exe &&
cd ..\bin\ && 7z a -tzip -mx9 zstd-win-release-%PLATFORM%.zip * &&
appveyor PushArtifact zstd-win-release-%PLATFORM%.zip
)
)
@ -159,13 +151,6 @@
cd ..\..\.. &&
make clean
)
- if [%TEST%]==[pzstd] (
make -C contrib\pzstd googletest-mingw64 &&
make -C contrib\pzstd pzstd.exe &&
make -C contrib\pzstd tests &&
make -C contrib\pzstd check &&
make -C contrib\pzstd clean
)
- SET "FUZZERTEST=-T30s"
- if [%HOST%]==[visual] if [%CONFIGURATION%]==[Release] (
CD tests &&

View File

@ -585,7 +585,10 @@ std::uint64_t writeFile(
std::uint64_t bytesWritten = 0;
std::shared_ptr<BufferWorkQueue> out;
// Grab the output queue for each decompression job (in order).
while (outs.pop(out) && !errorHolder.hasError()) {
while (outs.pop(out)) {
if (errorHolder.hasError()) {
continue;
}
if (!decompress) {
// If we are compressing and want to write skippable frames we can't
// start writing before compression is done because we need to know the

View File

@ -1,10 +1,10 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>zstd 1.2.0 Manual</title>
<title>zstd 1.3.0 Manual</title>
</head>
<body>
<h1>zstd 1.2.0 Manual</h1>
<h1>zstd 1.3.0 Manual</h1>
<hr>
<a name="Contents"></a><h2>Contents</h2>
<ol>
@ -13,14 +13,14 @@
<li><a href="#Chapter3">Simple API</a></li>
<li><a href="#Chapter4">Explicit memory management</a></li>
<li><a href="#Chapter5">Simple dictionary API</a></li>
<li><a href="#Chapter6">Fast dictionary API</a></li>
<li><a href="#Chapter6">Bulk processing dictionary API</a></li>
<li><a href="#Chapter7">Streaming</a></li>
<li><a href="#Chapter8">Streaming compression - HowTo</a></li>
<li><a href="#Chapter9">Streaming decompression - HowTo</a></li>
<li><a href="#Chapter10">START OF ADVANCED AND EXPERIMENTAL FUNCTIONS</a></li>
<li><a href="#Chapter11">Advanced types</a></li>
<li><a href="#Chapter12">Compressed size functions</a></li>
<li><a href="#Chapter13">Decompressed size functions</a></li>
<li><a href="#Chapter12">Frame size functions</a></li>
<li><a href="#Chapter13">Context memory usage</a></li>
<li><a href="#Chapter14">Advanced compression functions</a></li>
<li><a href="#Chapter15">Advanced decompression functions</a></li>
<li><a href="#Chapter16">Advanced streaming functions</a></li>
@ -31,26 +31,27 @@
</ol>
<hr>
<a name="Chapter1"></a><h2>Introduction</h2><pre>
zstd, short for Zstandard, is a fast lossless compression algorithm, targeting real-time compression scenarios
at zlib-level and better compression ratios. The zstd compression library provides in-memory compression and
decompression functions. The library supports compression levels from 1 up to ZSTD_maxCLevel() which is 22.
zstd, short for Zstandard, is a fast lossless compression algorithm,
targeting real-time compression scenarios at zlib-level and better compression ratios.
The zstd compression library provides in-memory compression and decompression functions.
The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22.
Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
Compression can be done in:
- a single step (described as Simple API)
- a single step, reusing a context (described as Explicit memory management)
- unbounded multiple steps (described as Streaming compression)
The compression ratio achievable on small data can be highly improved using compression with a dictionary in:
The compression ratio achievable on small data can be highly improved using a dictionary in:
- a single step (described as Simple dictionary API)
- a single step, reusing a dictionary (described as Fast dictionary API)
Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
These APIs shall never be used with a dynamic library.
Advanced experimental APIs shall never be used with a dynamic library.
They are not "stable", their definition may change in the future. Only static linking is allowed.
<BR></pre>
<a name="Chapter2"></a><h2>Version</h2><pre></pre>
<pre><b>unsigned ZSTD_versionNumber(void); </b>/**< library version number; to be used when checking dll version */<b>
<pre><b>unsigned ZSTD_versionNumber(void); </b>/**< useful to check dll version */<b>
</b></pre><BR>
<a name="Chapter3"></a><h2>Simple API</h2><pre></pre>
@ -66,28 +67,24 @@
<pre><b>size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
</b><p> `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
`dstCapacity` is an upper bound of originalSize.
`dstCapacity` is an upper bound of originalSize to regenerate.
If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
or an errorCode if it fails (which can be tested using ZSTD_isError()).
</p></pre><BR>
<pre><b>unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
</b><p> NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize.
ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single
frame, but distinguishes empty frames from frames with an unknown size, or errors.
Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple
concatenated frames in one buffer, and so is more general.
As a result however, it requires more computation and entire frames to be passed to it,
as opposed to ZSTD_getFrameContentSize which requires only a single frame's header.
</b><p> NOTE: This function is planned to be obsolete, in favor of ZSTD_getFrameContentSize().
ZSTD_getFrameContentSize() works the same way,
returning the decompressed size of a single frame,
but distinguishes empty frames from frames with an unknown size, or errors.
'src' is the start of a zstd compressed frame.
@return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
note 1 : decompressed size is an optional field, it may not be present, typically in streaming mode.
When `return==0`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
Optionally, application can use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
@ -96,7 +93,7 @@
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more.
note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameHeader() to know more.
</p></pre><BR>
<h3>Helper functions</h3><pre></pre><b><pre>int ZSTD_maxCLevel(void); </b>/*!< maximum compression level available */<b>
@ -114,20 +111,26 @@ const char* ZSTD_getErrorName(size_t code); </b>/*!< provides readable strin
ZSTD_CCtx* ZSTD_createCCtx(void);
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
</pre></b><BR>
<pre><b>size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
<pre><b>size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
</b><p> Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()).
</p></pre><BR>
<h3>Decompression context</h3><pre> When decompressing many times,
it is recommended to allocate a context just once, and re-use it for each successive compression operation.
it is recommended to allocate a context only once,
and re-use it for each successive compression operation.
This will make workload friendlier for system's memory.
Use one context per thread for parallel execution in multi-threaded environments.
Use one context per thread for parallel execution.
</pre><b><pre>typedef struct ZSTD_DCtx_s ZSTD_DCtx;
ZSTD_DCtx* ZSTD_createDCtx(void);
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
</pre></b><BR>
<pre><b>size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
</b><p> Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()).
<pre><b>size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
</b><p> Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx())
</p></pre><BR>
<a name="Chapter5"></a><h2>Simple dictionary API</h2><pre></pre>
@ -137,32 +140,33 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
int compressionLevel);
</b><p> Compression using a predefined Dictionary (see dictBuilder/zdict.h).
Note : This function loads the dictionary, resulting in significant startup delay.
Note : When `dict == NULL || dictSize < 8` no dictionary is used.
</b><p> Compression using a predefined Dictionary (see dictBuilder/zdict.h).
Note : This function loads the dictionary, resulting in significant startup delay.
Note : When `dict == NULL || dictSize < 8` no dictionary is used.
</p></pre><BR>
<pre><b>size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
</b><p> Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
Dictionary must be identical to the one used during compression.
Note : This function loads the dictionary, resulting in significant startup delay.
Note : When `dict == NULL || dictSize < 8` no dictionary is used.
</b><p> Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
Dictionary must be identical to the one used during compression.
Note : This function loads the dictionary, resulting in significant startup delay.
Note : When `dict == NULL || dictSize < 8` no dictionary is used.
</p></pre><BR>
<a name="Chapter6"></a><h2>Fast dictionary API</h2><pre></pre>
<a name="Chapter6"></a><h2>Bulk processing dictionary API</h2><pre></pre>
<pre><b>ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel);
</b><p> When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only.
`dictBuffer` can be released after ZSTD_CDict creation, as its content is copied within CDict
<pre><b>ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
int compressionLevel);
</b><p> When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
`dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict
</p></pre><BR>
<pre><b>size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
</b><p> Function frees memory allocated by ZSTD_createCDict().
</b><p> Function frees memory allocated by ZSTD_createCDict().
</p></pre><BR>
<pre><b>size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
@ -176,20 +180,20 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
</b><p> Create a digested dictionary, ready to start decompression operation without startup delay.
dictBuffer can be released after DDict creation, as its content is copied inside DDict
</b><p> Create a digested dictionary, ready to start decompression operation without startup delay.
dictBuffer can be released after DDict creation, as its content is copied inside DDict
</p></pre><BR>
<pre><b>size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
</b><p> Function frees memory allocated with ZSTD_createDDict()
</b><p> Function frees memory allocated with ZSTD_createDDict()
</p></pre><BR>
<pre><b>size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_DDict* ddict);
</b><p> Decompression using a digested Dictionary.
Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times.
</b><p> Decompression using a digested Dictionary.
Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times.
</p></pre><BR>
<a name="Chapter7"></a><h2>Streaming</h2><pre></pre>
@ -236,14 +240,18 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
ZSTD_endStream() instructs to finish a frame.
It will perform a flush and write frame epilogue.
The epilogue is required for decoders to consider a frame completed.
Similar to ZSTD_flushStream(), it may not be able to flush the full content if `output->size` is too small.
ZSTD_endStream() may not be able to flush full data if `output->size` is too small.
In which case, call again ZSTD_endStream() to complete the flush.
@return : nb of bytes still present within internal buffer (0 if it's empty, hence compression completed)
@return : 0 if frame fully completed and fully flushed,
or >0 if some data is still present within internal buffer
(value is minimum size estimation for remaining data to flush, but it could be more)
or an error code, which can be tested using ZSTD_isError().
<BR></pre>
<pre><b>typedef ZSTD_CCtx ZSTD_CStream; </b>/**< CCtx and CStream are now effectively same object (>= v1.3.0) */<b>
</b></pre><BR>
<h3>ZSTD_CStream management functions</h3><pre></pre><b><pre>ZSTD_CStream* ZSTD_createCStream(void);
size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
</pre></b><BR>
@ -277,6 +285,8 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
<BR></pre>
<pre><b>typedef ZSTD_DCtx ZSTD_DStream; </b>/**< DCtx and DStream are now effectively same object (>= v1.3.0) */<b>
</b></pre><BR>
<h3>ZSTD_DStream management functions</h3><pre></pre><b><pre>ZSTD_DStream* ZSTD_createDStream(void);
size_t ZSTD_freeDStream(ZSTD_DStream* zds);
</pre></b><BR>
@ -296,7 +306,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
<a name="Chapter11"></a><h2>Advanced types</h2><pre></pre>
<pre><b>typedef enum { ZSTD_fast, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2, ZSTD_btopt, ZSTD_btopt2 } ZSTD_strategy; </b>/* from faster to stronger */<b>
<pre><b>typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy; </b>/* from faster to stronger */<b>
</b></pre><BR>
<pre><b>typedef struct {
unsigned windowLog; </b>/**< largest match distance : larger == more compression, more memory needed during decompression */<b>
@ -319,68 +330,141 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
ZSTD_frameParameters fParams;
} ZSTD_parameters;
</b></pre><BR>
<pre><b>typedef struct {
unsigned long long frameContentSize;
size_t windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameHeader;
</b></pre><BR>
<h3>Custom memory allocation functions</h3><pre></pre><b><pre>typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
</b>/* use this constant to defer to stdlib's functions */<b>
static const ZSTD_customMem ZSTD_defaultCMem = { NULL, NULL, NULL };
</pre></b><BR>
<a name="Chapter12"></a><h2>Compressed size functions</h2><pre></pre>
<a name="Chapter12"></a><h2>Frame size functions</h2><pre></pre>
<pre><b>size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD encoded frame or skippable frame
`srcSize` must be at least as large as the frame
@return : the compressed size of the frame pointed to by `src`, suitable to pass to
`ZSTD_decompress` or similar, or an error code if given invalid input.
@return : the compressed size of the frame pointed to by `src`,
suitable to pass to `ZSTD_decompress` or similar,
or an error code if given invalid input.
</p></pre><BR>
<a name="Chapter13"></a><h2>Decompressed size functions</h2><pre></pre>
<pre><b>unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD encoded frame
`srcSize` must be at least as large as the frame header. A value greater than or equal
to `ZSTD_frameHeaderSize_max` is guaranteed to be large enough in all cases.
@return : decompressed size of the frame pointed to be `src` if known, otherwise
- ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
<pre><b>#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD encoded frame.
`srcSize` must be at least as large as the frame header.
A value >= `ZSTD_frameHeaderSize_max` is guaranteed to be large enough.
@return : - decompressed size of the frame pointed to be `src` if known
- ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
</p></pre><BR>
<pre><b>unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
</b><p> `src` should point the start of a series of ZSTD encoded and/or skippable frames
`srcSize` must be the _exact_ size of this series
</b><p> `src` should point the start of a series of ZSTD encoded and/or skippable frames
`srcSize` must be the _exact_ size of this series
(i.e. there should be a frame boundary exactly `srcSize` bytes after `src`)
@return : the decompressed size of all data in the contained frames, as a 64-bit value _if known_
- if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
- if an error occurred: ZSTD_CONTENTSIZE_ERROR
@return : - decompressed size of all data in all successive frames
- if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
- if an error occurred: ZSTD_CONTENTSIZE_ERROR
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
potentially larger than what local system can handle as a single memory segment.
In which case, it's necessary to use streaming mode to decompress data.
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
read each contained frame header. This is efficient as most of the data is skipped,
however it does mean that all frame data must be present and valid.
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
potentially larger than what local system can handle as a single memory segment.
In which case, it's necessary to use streaming mode to decompress data.
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
read each contained frame header. This is efficient as most of the data is skipped,
however it does mean that all frame data must be present and valid.
</p></pre><BR>
<pre><b>size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD frame
`srcSize` must be >= ZSTD_frameHeaderSize_prefix.
@return : size of the Frame Header
</p></pre><BR>
<a name="Chapter13"></a><h2>Context memory usage</h2><pre></pre>
<pre><b>size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
</b><p> These functions give the current memory usage of selected object.
Object memory usage can evolve if it's re-used multiple times.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCCtxSize(int compressionLevel);
size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams);
size_t ZSTD_estimateDCtxSize(void);
</b><p> These functions make it possible to estimate memory usage
of a future {D,C}Ctx, before its creation.
ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
It will also consider src size to be arbitrarily "large", which is worst case.
If srcSize is known to always be small, ZSTD_estimateCCtxSize_advanced() can provide a tighter estimation.
ZSTD_estimateCCtxSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
Note : CCtx estimation is only correct for single-threaded compression
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCStreamSize(int compressionLevel);
size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams);
size_t ZSTD_estimateDStreamSize(size_t windowSize);
size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
</b><p> ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
It will also consider src size to be arbitrarily "large", which is worst case.
If srcSize is known to always be small, ZSTD_estimateCStreamSize_advanced() can provide a tighter estimation.
ZSTD_estimateCStreamSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
Note : CStream estimation is only correct for single-threaded compression.
ZSTD_DStream memory budget depends on window Size.
This information can be passed manually, using ZSTD_estimateDStreamSize,
or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
an internal ?Dict will be created, which additional size is not estimated here.
In this case, get total size by adding ZSTD_estimate?DictSize
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference);
size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
</b><p> ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
ZSTD_estimateCStreamSize_advanced() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
Note : dictionary created "byReference" are smaller
</p></pre><BR>
<a name="Chapter14"></a><h2>Advanced compression functions</h2><pre></pre>
<pre><b>size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
</b><p> Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters.
`frameContentSize` is an optional parameter, provide `0` if unknown
</p></pre><BR>
<pre><b>ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
</b><p> Create a ZSTD compression context using external alloc and free functions
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
</b><p> Gives the amount of memory used by a given ZSTD_CCtx
<pre><b>ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
</b><p> workspace: The memory area to emplace the context into.
Provided pointer must 8-bytes aligned.
It must outlive context usage.
workspaceSize: Use ZSTD_estimateCCtxSize() or ZSTD_estimateCStreamSize()
to determine how large workspace must be to support scenario.
@return : pointer to ZSTD_CCtx*, or NULL if error (size too small)
Note : zstd will never resize nor malloc() when using a static cctx.
If it needs more memory than available, it will simply error out.
Note 2 : there is no corresponding "free" function.
Since workspace was allocated externally, it must be freed externally too.
Limitation 1 : currently not compatible with internal CDict creation, such as
ZSTD_CCtx_loadDictionary() or ZSTD_initCStream_usingDict().
Limitation 2 : currently not compatible with multi-threading
</p></pre><BR>
<pre><b>typedef enum {
@ -399,13 +483,34 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict
</p></pre><BR>
<pre><b>ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
<pre><b>typedef enum { ZSTD_dm_auto=0, </b>/* dictionary is "full" if it starts with ZSTD_MAGIC_DICTIONARY, rawContent otherwize */<b>
ZSTD_dm_rawContent, </b>/* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */<b>
ZSTD_dm_fullDict </b>/* refuses to load a dictionary if it does not respect Zstandard's specification */<b>
} ZSTD_dictMode_e;
</b></pre><BR>
<pre><b>ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
unsigned byReference, ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem);
</b><p> Create a ZSTD_CDict using external alloc and free, and customized compression parameters
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
</b><p> Gives the amount of memory used by a given ZSTD_sizeof_CDict
<pre><b>ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
unsigned byReference, ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams);
</b><p> Generate a digested dictionary in provided memory area.
workspace: The memory area to emplace the dictionary into.
Provided pointer must 8-bytes aligned.
It must outlive dictionary usage.
workspaceSize: Use ZSTD_estimateCDictSize()
to determine how large workspace must be.
cParams : use ZSTD_getCParams() to transform a compression level
into its relevants cParams.
@return : pointer to ZSTD_CDict*, or NULL if error (size too small)
Note : there is no corresponding "free" function.
Since workspace was allocated externally, it must be freed externally.
</p></pre><BR>
<pre><b>ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
@ -423,8 +528,8 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
</p></pre><BR>
<pre><b>ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
</b><p> optimize params for a given `srcSize` and `dictSize`.
both values are optional, select `0` if unknown.
</b><p> optimize params for a given `srcSize` and `dictSize`.
both values are optional, select `0` if unknown.
</p></pre><BR>
<pre><b>size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
@ -451,22 +556,32 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
Note 3 : Skippable Frame Identifiers are considered valid.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateDCtxSize(void);
</b><p> Gives the potential amount of memory allocated to create a ZSTD_DCtx
</p></pre><BR>
<pre><b>ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
</b><p> Create a ZSTD decompression context using external alloc and free functions
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
</b><p> Gives the amount of memory used by a given ZSTD_DCtx
<pre><b>ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
</b><p> workspace: The memory area to emplace the context into.
Provided pointer must 8-bytes aligned.
It must outlive context usage.
workspaceSize: Use ZSTD_estimateDCtxSize() or ZSTD_estimateDStreamSize()
to determine how large workspace must be to support scenario.
@return : pointer to ZSTD_DCtx*, or NULL if error (size too small)
Note : zstd will never resize nor malloc() when using a static dctx.
If it needs more memory than available, it will simply error out.
Note 2 : static dctx is incompatible with legacy support
Note 3 : there is no corresponding "free" function.
Since workspace was allocated externally, it must be freed externally.
Limitation : currently not compatible with internal DDict creation,
such as ZSTD_initDStream_usingDict().
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
</b><p> Create a digested dictionary, ready to start decompression operation without startup delay.
Dictionary content is simply referenced, and therefore stays in dictBuffer.
It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict
Dictionary content is referenced, and therefore stays in dictBuffer.
It is important that dictBuffer outlives DDict,
it must remain read accessible throughout the lifetime of DDict
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
@ -474,8 +589,19 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
</b><p> Create a ZSTD_DDict using external alloc and free, optionally by reference
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
</b><p> Gives the amount of memory used by a given ZSTD_DDict
<pre><b>ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
unsigned byReference);
</b><p> Generate a digested dictionary in provided memory area.
workspace: The memory area to emplace the dictionary into.
Provided pointer must 8-bytes aligned.
It must outlive dictionary usage.
workspaceSize: Use ZSTD_estimateDDictSize()
to determine how large workspace must be.
@return : pointer to ZSTD_DDict*, or NULL if error (size too small)
Note : there is no corresponding "free" function.
Since workspace was allocated externally, it must be freed externally.
</p></pre><BR>
<pre><b>unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
@ -499,19 +625,19 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
Note : this use case also happens when using a non-conformant dictionary.
- `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- This is not a Zstandard frame.
When identifying the exact failure cause, it's possible to use ZSTD_getFrameParams(), which will provide a more precise error code.
When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.
</p></pre><BR>
<a name="Chapter16"></a><h2>Advanced streaming functions</h2><pre></pre>
<h3>Advanced Streaming compression functions</h3><pre></pre><b><pre>ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); </b>/**< size of CStream is variable, depending primarily on compression level */<b>
ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); </b>/**< same as ZSTD_initStaticCCtx() */<b>
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */<b>
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); </b>/**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. */<b>
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */<b>
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); </b>/**< note : cdict will just be referenced, and must outlive compression session */<b>
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams); </b>/**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */<b>
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); </b>/**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */<b>
</pre></b><BR>
<pre><b>size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
</b><p> start a new compression job, using same parameters from previous job.
@ -524,11 +650,11 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict*
<h3>Advanced Streaming decompression functions</h3><pre></pre><b><pre>typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); </b>/**< same as ZSTD_initStaticDCtx() */<b>
size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); </b>/**< note : ddict will just be referenced, and must outlive decompression session */<b>
size_t ZSTD_resetDStream(ZSTD_DStream* zds); </b>/**< re-use decompression parameters from previous init; saves dictionary loading */<b>
size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
</pre></b><BR>
<a name="Chapter17"></a><h2>Buffer-less and synchronous inner streaming functions</h2><pre>
This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
@ -578,21 +704,24 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned lo
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameParams().
It fills a ZSTD_frameParams structure which provide important information to correctly decode the frame,
such as the minimum rolling buffer size to allocate to decompress data (`windowSize`),
and the dictionary ID used.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
such as minimum rolling buffer size to allocate to decompress data (`windowSize`),
and the dictionary ID in use.
(Note : content size is optional, it may not be present. 0 means : content size unknown).
Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information.
As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation.
Each application can set its own limit, depending on local restrictions. For extended interoperability, it is recommended to support at least 8 MB.
Frame parameters are extracted from the beginning of the compressed frame.
Data fragment must be large enough to ensure successful decoding, typically `ZSTD_frameHeaderSize_max` bytes.
@result : 0 : successful decoding, the `ZSTD_frameParams` structure is correctly filled.
Each application can set its own limit, depending on local restrictions.
For extended interoperability, it is recommended to support windowSize of at least 8 MB.
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
Start decompression, with ZSTD_decompressBegin() or ZSTD_decompressBegin_usingDict().
Start decompression, with ZSTD_decompressBegin().
If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
Alternatively, you can copy a prepared context, using ZSTD_copyDCtx().
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
@ -624,29 +753,196 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned lo
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
For skippable frames ZSTD_decompressContinue() always returns 0.
For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
For skippable frames ZSTD_getFrameHeader() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
Note : If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might actually be a Zstd encoded frame with no content.
For purposes of decompression, it is valid in both cases to skip the frame using
ZSTD_findFrameCompressedSize to find its size in bytes.
It also returns Frame Size as fparamsPtr->frameContentSize.
<BR></pre>
<pre><b>typedef struct {
unsigned long long frameContentSize;
unsigned windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameParams;
</b></pre><BR>
<h3>Buffer-less streaming decompression functions</h3><pre></pre><b><pre>size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize); </b>/**< doesn't consume input, see details below */<b>
<h3>Buffer-less streaming decompression functions</h3><pre></pre><b><pre>size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); </b>/**< doesn't consume input */<b>
size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
</pre></b><BR>
<pre><b>typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
</b></pre><BR>
<h3>New advanced API (experimental, and compression only)</h3><pre></pre><b><pre></pre></b><BR>
<pre><b>typedef enum {
</b>/* compression parameters */<b>
ZSTD_p_compressionLevel=100, </b>/* Update all compression parameters according to pre-defined cLevel table<b>
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means "do not change cLevel". */
ZSTD_p_windowLog, </b>/* Maximum allowed back-reference distance, expressed as power of 2.<b>
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
* Special: value 0 means "do not change windowLog". */
ZSTD_p_hashLog, </b>/* Size of the probe table, as a power of 2.<b>
* Resulting table size is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
* Larger tables improve compression ratio of strategies <= dFast,
* and improve speed of strategies > dFast.
* Special: value 0 means "do not change hashLog". */
ZSTD_p_chainLog, </b>/* Size of the full-search table, as a power of 2.<b>
* Resulting table size is (1 << (chainLog+2)).
* Larger tables result in better and slower compression.
* This parameter is useless when using "fast" strategy.
* Special: value 0 means "do not change chainLog". */
ZSTD_p_searchLog, </b>/* Number of search attempts, as a power of 2.<b>
* More attempts result in better and slower compression.
* This parameter is useless when using "fast" and "dFast" strategies.
* Special: value 0 means "do not change searchLog". */
ZSTD_p_minMatch, </b>/* Minimum size of searched matches (note : repCode matches can be smaller).<b>
* Larger values make faster compression and decompression, but decrease ratio.
* Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
* Note that currently, for all strategies < btopt, effective minimum is 4.
* Note that currently, for all strategies > fast, effective maximum is 6.
* Special: value 0 means "do not change minMatchLength". */
ZSTD_p_targetLength, </b>/* Only useful for strategies >= btopt.<b>
* Length of Match considered "good enough" to stop search.
* Larger values make compression stronger and slower.
* Special: value 0 means "do not change targetLength". */
ZSTD_p_compressionStrategy, </b>/* See ZSTD_strategy enum definition.<b>
* Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
* The higher the value of selected strategy, the more complex it is,
* resulting in stronger and slower compression.
* Special: value 0 means "do not change strategy". */
</b>/* frame parameters */<b>
ZSTD_p_contentSizeFlag=200, </b>/* Content size is written into frame header _whenever known_ (default:1) */<b>
ZSTD_p_checksumFlag, </b>/* A 32-bits checksum of content is written at end of frame (default:0) */<b>
ZSTD_p_dictIDFlag, </b>/* When applicable, dictID of dictionary is provided in frame header (default:1) */<b>
</b>/* dictionary parameters (must be set before ZSTD_CCtx_loadDictionary) */<b>
ZSTD_p_dictMode=300, </b>/* Select how dictionary content must be interpreted. Value must be from type ZSTD_dictMode_e.<b>
* default : 0==auto : dictionary will be "full" if it respects specification, otherwise it will be "rawContent" */
ZSTD_p_refDictContent, </b>/* Dictionary content will be referenced, instead of copied (default:0==byCopy).<b>
* It requires that dictionary buffer outlives its users */
</b>/* multi-threading parameters */<b>
ZSTD_p_nbThreads=400, </b>/* Select how many threads a compression job can spawn (default:1)<b>
* More threads improve speed, but also increase memory usage.
* Can only receive a value > 1 if ZSTD_MULTITHREAD is enabled.
* Special: value 0 means "do not change nbThreads" */
ZSTD_p_jobSize, </b>/* Size of a compression job. Each compression job is completed in parallel.<b>
* 0 means default, which is dynamically determined based on compression parameters.
* Job size must be a minimum of overlapSize, or 1 KB, whichever is largest
* The minimum size is automatically and transparently enforced */
ZSTD_p_overlapSizeLog, </b>/* Size of previous input reloaded at the beginning of each job.<b>
* 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
</b>/* advanced parameters - may not remain available after API update */<b>
ZSTD_p_forceMaxWindow=1100, </b>/* Force back-reference distances to remain < windowSize,<b>
* even when referencing into Dictionary content (default:0) */
} ZSTD_cParameter;
</b></pre><BR>
<pre><b>size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
</b><p> Set one compression parameter, selected by enum ZSTD_cParameter.
Note : when `value` is an enum, cast it to unsigned for proper type checking.
@result : 0, or an error code (which can be tested with ZSTD_isError()).
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
</b><p> Total input data size to be compressed as a single frame.
This value will be controlled at the end, and result in error if not respected.
@result : 0, or an error code (which can be tested with ZSTD_isError()).
Note 1 : 0 means zero, empty.
In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
Note that ZSTD_CONTENTSIZE_UNKNOWN is default value for new compression jobs.
Note 2 : If all data is provided and consumed in a single round,
this value is overriden by srcSize instead.
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
</b><p> Create an internal CDict from dict buffer.
Decompression will have to use same buffer.
@result : 0, or an error code (which can be tested with ZSTD_isError()).
Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
meaning "return to no-dictionary mode".
Note 1 : `dict` content will be copied internally,
except if ZSTD_p_refDictContent is set before loading.
Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
For this reason, compression parameters cannot be changed anymore after loading a dictionary.
It's also a CPU-heavy operation, with non-negligible impact on latency.
Note 3 : Dictionary will be used for all future compression jobs.
To return to "no-dictionary" situation, load a NULL dictionary
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
</b><p> Reference a prepared dictionary, to be used for all next compression jobs.
Note that compression parameters are enforced from within CDict,
and supercede any compression parameter previously set within CCtx.
The dictionary will remain valid for future compression jobs using same CCtx.
@result : 0, or an error code (which can be tested with ZSTD_isError()).
Special : adding a NULL CDict means "return to no-dictionary mode".
Note 1 : Currently, only one dictionary can be managed.
Adding a new dictionary effectively "discards" any previous one.
Note 2 : CDict is just referenced, its lifetime must outlive CCtx.
</p></pre><BR>
<pre><b>size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
</b><p> Reference a prefix (single-usage dictionary) for next compression job.
Decompression need same prefix to properly regenerate data.
Prefix is **only used once**. Tables are discarded at end of compression job.
Subsequent compression jobs will be done without prefix (if none is explicitly referenced).
If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead.
@result : 0, or an error code (which can be tested with ZSTD_isError()).
Special : Adding any prefix (including NULL) invalidates any previous prefix or dictionary
Note 1 : Prefix buffer is referenced. It must outlive compression job.
Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
It's a CPU-heavy operation, with non-negligible impact on latency.
Note 3 : it's possible to alter ZSTD_p_dictMode using ZSTD_CCtx_setParameter()
</p></pre><BR>
<pre><b>typedef enum {
ZSTD_e_continue=0, </b>/* collect more data, encoder transparently decides when to output result, for optimal conditions */<b>
ZSTD_e_flush, </b>/* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */<b>
ZSTD_e_end </b>/* flush any remaining data and ends current frame. Any future compression starts a new frame. */<b>
} ZSTD_EndDirective;
</b></pre><BR>
<pre><b>size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp);
</b><p> Behave about the same as ZSTD_compressStream. To note :
- Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
- Compression parameters cannot be changed once compression is started.
- *dstPos must be <= dstCapacity, *srcPos must be <= srcSize
- *dspPos and *srcPos will be updated. They are guaranteed to remain below their respective limit.
- @return provides the minimum amount of data still to flush from internal buffers
or an error code, which can be tested using ZSTD_isError().
if @return != 0, flush is not fully completed, there is some data left within internal buffers.
- after a ZSTD_e_end directive, if internal buffer is not fully flushed,
only ZSTD_e_end or ZSTD_e_flush operations are allowed.
It is necessary to fully flush internal buffers
before starting a new compression job, or changing compression parameters.
</p></pre><BR>
<pre><b>void ZSTD_CCtx_reset(ZSTD_CCtx* cctx); </b>/* Not ready yet ! */<b>
</b><p> Return a CCtx to clean state.
Useful after an error, or to interrupt an ongoing compression job and start a new one.
Any internal data not yet flushed is cancelled.
Dictionary (if any) is dropped.
It's possible to modify compression parameters after a reset.
</p></pre><BR>
<pre><b>size_t ZSTD_compress_generic_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp);
</b><p> Same as ZSTD_compress_generic(),
but using only integral types as arguments.
Argument list is larger and less expressive than ZSTD_{in,out}Buffer,
but can be helpful for binders from dynamic languages
which have troubles handling structures containing memory pointers.
</p></pre><BR>
<a name="Chapter20"></a><h2>Block functions</h2><pre>
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
@ -659,7 +955,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSizeMax() <= ZSTD_BLOCKSIZE_ABSOLUTEMAX
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
@ -672,7 +968,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
Use ZSTD_insertBlock() for such a case.
<BR></pre>
<h3>Raw zstd block functions</h3><pre></pre><b><pre>size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx);
<h3>Raw zstd block functions</h3><pre></pre><b><pre>size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); </b>/**< insert block into `dctx` history. Useful for uncompressed blocks */<b>

View File

@ -46,7 +46,7 @@ clean:
simple_compression simple_decompression \
dictionary_compression dictionary_decompression \
streaming_compression streaming_decompression \
multiple_streaming_compression
multiple_streaming_compression
@echo Cleaning completed
test: all

View File

@ -22,9 +22,11 @@ VERSION?= $(LIBVER)
CPPFLAGS+= -I. -I./common -DXXH_NAMESPACE=ZSTD_
CFLAGS ?= -O3
DEBUGFLAGS = -g -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security
DEBUGFLAGS = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
-Wredundant-decls
CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS)
@ -71,7 +73,7 @@ libzstd.a: $(ZSTD_OBJ)
@echo compiling static library
@$(AR) $(ARFLAGS) $@ $^
libzstd.a-mt: CPPFLAGS += -DZSTD_MULTHREAD
libzstd.a-mt: CPPFLAGS += -DZSTD_MULTITHREAD
libzstd.a-mt: libzstd.a
$(LIBZSTD): LDFLAGS += -shared -fPIC -fvisibility=hidden
@ -147,7 +149,7 @@ install: libzstd.a libzstd libzstd.pc
@$(INSTALL) -d -m 755 $(DESTDIR)$(PKGCONFIGDIR)/ $(DESTDIR)$(INCLUDEDIR)/
@$(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/
@echo Installing libraries
@$(INSTALL_LIB) libzstd.a $(DESTDIR)$(LIBDIR)
@$(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR)
@$(INSTALL_LIB) libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)
@ln -sf libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR)
@ln -sf libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT)

View File

@ -39,7 +39,6 @@
extern "C" {
#endif
/*
* This API consists of small unitary functions, which must be inlined for best performance.
* Since link-time-optimization is not available for all compilers,
@ -59,7 +58,9 @@ extern "C" {
#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
# include <assert.h>
#else
# define assert(condition) ((void)0)
# ifndef assert
# define assert(condition) ((void)0)
# endif
#endif
@ -74,6 +75,7 @@ extern "C" {
#define STREAM_ACCUMULATOR_MIN_64 57
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
/*-******************************************
* bitStream encoding API (write forward)
********************************************/
@ -303,13 +305,25 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
default:;
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
/* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
/* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
/* fall-through */
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;

View File

@ -24,7 +24,8 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
case PREFIX(frameParameter_unsupportedBy32bits): return "Frame parameter unsupported in 32-bits mode";
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
case PREFIX(compressionParameter_unsupported): return "Compression parameter is out of bound";
case PREFIX(compressionParameter_unsupported): return "Compression parameter is not supported";
case PREFIX(compressionParameter_outOfBound): return "Compression parameter is out of bound";
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
@ -38,6 +39,8 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(maxCode):
default: return notErrorCode;
}

View File

@ -111,6 +111,18 @@ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void*
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
/**
* The minimum workspace size for the `workSpace` used in
* HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
*
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
* Buffer overflow errors may potentially occur if code modifications result in
* a required workspace size greater than that specified in the following
* macro.
*/
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
/* ******************************************************************
@ -170,8 +182,11 @@ size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cS
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
/* ****************************************
@ -243,7 +258,9 @@ HUF_decompress() does the following:
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
@ -266,8 +283,11 @@ size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);

View File

@ -352,20 +352,6 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
}
/* function safe only for comparisons */
MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length)
{
switch (length)
{
default :
case 4 : return MEM_read32(memPtr);
case 3 : if (MEM_isLittleEndian())
return MEM_read32(memPtr)<<8;
else
return MEM_read32(memPtr)>>8;
}
}
#if defined (__cplusplus)
}
#endif

View File

@ -146,6 +146,13 @@ void POOL_free(POOL_ctx *ctx) {
free(ctx);
}
size_t POOL_sizeof(POOL_ctx *ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
return sizeof(*ctx)
+ ctx->queueSize * sizeof(POOL_job)
+ ctx->numThreads * sizeof(pthread_t);
}
void POOL_add(void *ctxVoid, POOL_function function, void *opaque) {
POOL_ctx *ctx = (POOL_ctx *)ctxVoid;
if (!ctx) { return; }
@ -191,4 +198,9 @@ void POOL_add(void *ctx, POOL_function function, void *opaque) {
function(opaque);
}
size_t POOL_sizeof(POOL_ctx *ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
return sizeof(*ctx);
}
#endif /* ZSTD_MULTITHREAD */

View File

@ -32,6 +32,11 @@ POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
*/
void POOL_free(POOL_ctx *ctx);
/*! POOL_sizeof() :
return memory usage of pool returned by POOL_create().
*/
size_t POOL_sizeof(POOL_ctx *ctx);
/*! POOL_function :
The function type that can be added to a thread pool.
*/

View File

@ -1,4 +1,3 @@
/**
* Copyright (c) 2016 Tino Reichardt
* All rights reserved.

View File

@ -12,16 +12,19 @@
/*-*************************************
* Dependencies
***************************************/
#include <stdlib.h> /* malloc */
#include <stdlib.h> /* malloc, calloc, free */
#include <string.h> /* memset */
#include "error_private.h"
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
#include "zstd.h"
/*-****************************************
* Version
******************************************/
unsigned ZSTD_versionNumber (void) { return ZSTD_VERSION_NUMBER; }
unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
/*-****************************************
@ -47,27 +50,31 @@ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString
/*=**************************************************************
* Custom allocator
****************************************************************/
/* default uses stdlib */
void* ZSTD_defaultAllocFunction(void* opaque, size_t size)
{
void* address = malloc(size);
(void)opaque;
return address;
}
void ZSTD_defaultFreeFunction(void* opaque, void* address)
{
(void)opaque;
free(address);
}
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
{
return customMem.customAlloc(customMem.opaque, size);
if (customMem.customAlloc)
return customMem.customAlloc(customMem.opaque, size);
return malloc(size);
}
void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
{
if (customMem.customAlloc) {
/* calloc implemented as malloc+memset;
* not as efficient as calloc, but next best guess for custom malloc */
void* const ptr = customMem.customAlloc(customMem.opaque, size);
memset(ptr, 0, size);
return ptr;
}
return calloc(1, size);
}
void ZSTD_free(void* ptr, ZSTD_customMem customMem)
{
if (ptr!=NULL)
customMem.customFree(customMem.opaque, ptr);
if (ptr!=NULL) {
if (customMem.customFree)
customMem.customFree(customMem.opaque, ptr);
else
free(ptr);
}
}

View File

@ -19,10 +19,12 @@ extern "C" {
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
#if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
#else
# define ZSTDERRORLIB_VISIBILITY
#ifndef ZSTDERRORLIB_VISIBILITY
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
# else
# define ZSTDERRORLIB_VISIBILITY
# endif
#endif
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
@ -33,8 +35,11 @@ extern "C" {
#endif
/*-****************************************
* error codes list
******************************************/
* error codes list
* note : this API is still considered unstable
* it should not be used with a dynamic library
* only static linking is allowed
******************************************/
typedef enum {
ZSTD_error_no_error,
ZSTD_error_GENERIC,
@ -45,6 +50,7 @@ typedef enum {
ZSTD_error_frameParameter_unsupportedBy32bits,
ZSTD_error_frameParameter_windowTooLarge,
ZSTD_error_compressionParameter_unsupported,
ZSTD_error_compressionParameter_outOfBound,
ZSTD_error_init_missing,
ZSTD_error_memory_allocation,
ZSTD_error_stage_wrong,
@ -58,12 +64,14 @@ typedef enum {
ZSTD_error_dictionary_corrupted,
ZSTD_error_dictionary_wrong,
ZSTD_error_dictionaryCreation_failed,
ZSTD_error_frameIndex_tooLarge,
ZSTD_error_seekableIO,
ZSTD_error_maxCode
} ZSTD_ErrorCode;
/*! ZSTD_getErrorCode() :
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
which can be used to compare directly with enum list published into "error_public.h" */
which can be used to compare with enum list published above */
ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);

View File

@ -18,6 +18,7 @@
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
@ -50,9 +51,42 @@
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
#ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
#endif
#include "xxhash.h" /* XXH_reset, update, digest */
/*-*************************************
* Debug
***************************************/
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
# include <assert.h>
#else
# ifndef assert
# define assert(condition) ((void)0)
# endif
#endif
#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
# include <stdio.h>
/* recommended values for ZSTD_DEBUG display levels :
* 1 : no display, enables assert() only
* 2 : reserved for currently active debugging path
* 3 : events once per object lifetime (CCtx, CDict)
* 4 : events once per frame
* 5 : events once per block
* 6 : events once per sequence (*very* verbose) */
# define DEBUGLOG(l, ...) { \
if (l<=ZSTD_DEBUG) { \
fprintf(stderr, __FILE__ ": "); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, " \n"); \
} }
#else
# define DEBUGLOG(l, ...) {} /* disabled */
#endif
#include "xxhash.h" /* XXH_reset, update, digest */
/*-*************************************
@ -70,7 +104,6 @@
* Common constants
***************************************/
#define ZSTD_OPT_NUM (1<<12)
#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
#define ZSTD_REP_NUM 3 /* number of repcodes */
#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
@ -235,15 +268,10 @@ typedef struct {
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
int ZSTD_isSkipFrame(ZSTD_DCtx* dctx);
/* custom memory allocation functions */
void* ZSTD_defaultAllocFunction(void* opaque, size_t size);
void ZSTD_defaultFreeFunction(void* opaque, void* address);
#ifndef ZSTD_DLL_IMPORT
static const ZSTD_customMem defaultCustomMem = { ZSTD_defaultAllocFunction, ZSTD_defaultFreeFunction, NULL };
#endif
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
void ZSTD_free(void* ptr, ZSTD_customMem customMem);
@ -281,4 +309,26 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val)
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);
/*! ZSTD_initCStream_internal() :
* Private use only. Init streaming operation.
* expects params to be valid.
* must receive dict, or cdict, or none, but not both.
* @return : 0, or an error code */
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
const ZSTD_CDict* cdict,
ZSTD_parameters params, unsigned long long pledgedSrcSize);
/*! ZSTD_compressStream_generic() :
* Private use only. To be called from zstdmt_compress.c in single-thread mode. */
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode);
/*! ZSTD_getParamsFromCDict() :
* as the name implies */
ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict);
#endif /* ZSTD_CCOMMON_H_MODULE */

View File

@ -266,7 +266,8 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
if (highTotal <= lowTotal) break;
} }
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
nBitsToDecrease ++;
totalCost -= 1 << (nBitsToDecrease-1);
if (rankLast[nBitsToDecrease-1] == noSymbol)
@ -463,12 +464,15 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
{
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
HUF_FLUSHBITS_2(&bitC);
/* fall-through */
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
HUF_FLUSHBITS_1(&bitC);
/* fall-through */
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
HUF_FLUSHBITS(&bitC);
case 0 :
default: ;
/* fall-through */
case 0 : /* fall-through */
default: break;
}
for (; n>0; n-=4) { /* note : n&3==0 at this stage */

File diff suppressed because it is too large Load Diff

View File

@ -43,6 +43,7 @@ MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t src
if (ssPtr->litLengthSum == 0) {
if (srcSize <= 1024) ssPtr->staticPrices = 1;
assert(ssPtr->litFreq!=NULL);
for (u=0; u<=MaxLit; u++)
ssPtr->litFreq[u] = 0;
for (u=0; u<srcSize; u++)
@ -201,6 +202,20 @@ MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const B
}
/* function safe only for comparisons */
MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
{
switch (length)
{
default :
case 4 : return MEM_read32(memPtr);
case 3 : if (MEM_isLittleEndian())
return MEM_read32(memPtr)<<8;
else
return MEM_read32(memPtr)>>8;
}
}
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
@ -234,12 +249,12 @@ static U32 ZSTD_insertBtAndGetAllMatches (
{
const BYTE* const base = zc->base;
const U32 current = (U32)(ip-base);
const U32 hashLog = zc->params.cParams.hashLog;
const U32 hashLog = zc->appliedParams.cParams.hashLog;
const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
U32* const hashTable = zc->hashTable;
U32 matchIndex = hashTable[h];
U32* const bt = zc->chainTable;
const U32 btLog = zc->params.cParams.chainLog - 1;
const U32 btLog = zc->appliedParams.cParams.chainLog - 1;
const U32 btMask= (1U << btLog) - 1;
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const dictBase = zc->dictBase;
@ -267,7 +282,7 @@ static U32 ZSTD_insertBtAndGetAllMatches (
if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
} else {
match = dictBase + matchIndex3;
if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
if (ZSTD_readMINMATCH(match, MINMATCH) == ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
}
@ -410,10 +425,10 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
const BYTE* const base = ctx->base;
const BYTE* const prefixStart = base + ctx->dictLimit;
const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
const U32 sufficient_len = ctx->params.cParams.targetLength;
const U32 mls = ctx->params.cParams.searchLength;
const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
const U32 mls = ctx->appliedParams.cParams.searchLength;
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
ZSTD_match_t* matches = seqStorePtr->matchTable;
@ -439,7 +454,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
for (i=(ip == anchor); i<last_i; i++) {
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
&& (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) {
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
@ -524,7 +539,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
&& (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) {
&& (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
@ -663,10 +678,10 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
const BYTE* const dictBase = ctx->dictBase;
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
const U32 sufficient_len = ctx->params.cParams.targetLength;
const U32 mls = ctx->params.cParams.searchLength;
const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog;
const U32 sufficient_len = ctx->appliedParams.cParams.targetLength;
const U32 mls = ctx->appliedParams.cParams.searchLength;
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
ZSTD_match_t* matches = seqStorePtr->matchTable;
@ -698,7 +713,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
const BYTE* const repMatch = repBase + repIndex;
if ( (repCur > 0 && repCur <= (S32)current)
&& (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
&& (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
@ -794,7 +809,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
const BYTE* const repMatch = repBase + repIndex;
if ( (repCur > 0 && repCur <= (S32)(current+cur))
&& (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
&& (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
&& (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;

View File

@ -14,34 +14,31 @@
/* ====== Compiler specifics ====== */
#if defined(_MSC_VER)
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
#endif
/* ====== Dependencies ====== */
#include <stdlib.h> /* malloc */
#include <string.h> /* memcpy */
#include "pool.h" /* threadpool */
#include "threading.h" /* mutex */
#include "zstd_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
#include <string.h> /* memcpy, memset */
#include "pool.h" /* threadpool */
#include "threading.h" /* mutex */
#include "zstd_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
#include "zstdmt_compress.h"
/* ====== Debug ====== */
#if 0
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
# include <stdio.h>
# include <unistd.h>
# include <sys/times.h>
static unsigned g_debugLevel = 5;
# define DEBUGLOGRAW(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __VA_ARGS__); }
# define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); }
# define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); }
# define DEBUG_PRINTHEX(l,p,n) { \
unsigned debug_u; \
for (debug_u=0; debug_u<(n); debug_u++) \
# define DEBUG_PRINTHEX(l,p,n) { \
unsigned debug_u; \
for (debug_u=0; debug_u<(n); debug_u++) \
DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
DEBUGLOGRAW(l, " \n"); \
DEBUGLOGRAW(l, " \n"); \
}
static unsigned long long GetCurrentClockTimeMicroseconds(void)
@ -53,22 +50,22 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); }
}
#define MUTEX_WAIT_TIME_DLEVEL 5
#define PTHREAD_MUTEX_LOCK(mutex) \
if (g_debugLevel>=MUTEX_WAIT_TIME_DLEVEL) { \
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
pthread_mutex_lock(mutex); \
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
unsigned long long const elapsedTime = (afterTime-beforeTime); \
if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
elapsedTime, #mutex); \
} } \
} else pthread_mutex_lock(mutex);
#define MUTEX_WAIT_TIME_DLEVEL 6
#define PTHREAD_MUTEX_LOCK(mutex) { \
if (ZSTD_DEBUG>=MUTEX_WAIT_TIME_DLEVEL) { \
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
pthread_mutex_lock(mutex); \
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
unsigned long long const elapsedTime = (afterTime-beforeTime); \
if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
elapsedTime, #mutex); \
} } \
} else pthread_mutex_lock(mutex); \
}
#else
# define DEBUGLOG(l, ...) {} /* disabled */
# define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m)
# define DEBUG_PRINTHEX(l,p,n) {}
@ -87,16 +84,19 @@ static const buffer_t g_nullBuffer = { NULL, 0 };
typedef struct ZSTDMT_bufferPool_s {
unsigned totalBuffers;
unsigned nbBuffers;
ZSTD_customMem cMem;
buffer_t bTable[1]; /* variable size */
} ZSTDMT_bufferPool;
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads)
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads, ZSTD_customMem cMem)
{
unsigned const maxNbBuffers = 2*nbThreads + 2;
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)calloc(1, sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t));
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
if (bufPool==NULL) return NULL;
bufPool->totalBuffers = maxNbBuffers;
bufPool->nbBuffers = 0;
bufPool->cMem = cMem;
return bufPool;
}
@ -105,23 +105,39 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
unsigned u;
if (!bufPool) return; /* compatibility with free on NULL */
for (u=0; u<bufPool->totalBuffers; u++)
free(bufPool->bTable[u].start);
free(bufPool);
ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
ZSTD_free(bufPool, bufPool->cMem);
}
/* assumption : invocation from main thread only ! */
/* only works at initialization, not during compression */
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
{
size_t const poolSize = sizeof(*bufPool)
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t);
unsigned u;
size_t totalBufferSize = 0;
for (u=0; u<bufPool->totalBuffers; u++)
totalBufferSize += bufPool->bTable[u].size;
return poolSize + totalBufferSize;
}
/** ZSTDMT_getBuffer() :
* assumption : invocation from main thread only ! */
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize)
{
if (pool->nbBuffers) { /* try to use an existing buffer */
buffer_t const buf = pool->bTable[--(pool->nbBuffers)];
size_t const availBufferSize = buf.size;
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) /* large enough, but not too much */
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize))
/* large enough, but not too much */
return buf;
free(buf.start); /* size conditions not respected : scratch this buffer and create a new one */
/* size conditions not respected : scratch this buffer, create new one */
ZSTD_free(buf.start, pool->cMem);
}
/* create new buffer */
{ buffer_t buffer;
void* const start = malloc(bSize);
void* const start = ZSTD_malloc(bSize, pool->cMem);
if (start==NULL) bSize = 0;
buffer.start = start; /* note : start can be NULL if malloc fails ! */
buffer.size = bSize;
@ -138,7 +154,7 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf)
return;
}
/* Reached bufferPool capacity (should not happen) */
free(buf.start);
ZSTD_free(buf.start, pool->cMem);
}
@ -147,6 +163,7 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf)
typedef struct {
unsigned totalCCtx;
unsigned availCCtx;
ZSTD_customMem cMem;
ZSTD_CCtx* cctx[1]; /* variable size */
} ZSTDMT_CCtxPool;
@ -158,23 +175,40 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
unsigned u;
for (u=0; u<pool->totalCCtx; u++)
ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */
free(pool);
ZSTD_free(pool, pool->cMem);
}
/* ZSTDMT_createCCtxPool() :
* implies nbThreads >= 1 , checked by caller ZSTDMT_createCCtx() */
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads)
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
ZSTD_customMem cMem)
{
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) calloc(1, sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*));
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*), cMem);
if (!cctxPool) return NULL;
cctxPool->cMem = cMem;
cctxPool->totalCCtx = nbThreads;
cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
cctxPool->cctx[0] = ZSTD_createCCtx();
cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
DEBUGLOG(1, "cctxPool created, with %u threads", nbThreads);
DEBUGLOG(3, "cctxPool created, with %u threads", nbThreads);
return cctxPool;
}
/* only works during initialization phase, not during compression */
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
{
unsigned const nbThreads = cctxPool->totalCCtx;
size_t const poolSize = sizeof(*cctxPool)
+ (nbThreads-1)*sizeof(ZSTD_CCtx*);
unsigned u;
size_t totalCCtxSize = 0;
for (u=0; u<nbThreads; u++)
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
return poolSize + totalCCtxSize;
}
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool)
{
if (pool->availCCtx) {
@ -218,7 +252,7 @@ typedef struct {
pthread_mutex_t* jobCompleted_mutex;
pthread_cond_t* jobCompleted_cond;
ZSTD_parameters params;
ZSTD_CDict* cdict;
const ZSTD_CDict* cdict;
unsigned long long fullFrameSize;
} ZSTDMT_jobDescription;
@ -228,11 +262,11 @@ void ZSTDMT_compressChunk(void* jobDescription)
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
const void* const src = (const char*)job->srcStart + job->dictSize;
buffer_t const dstBuff = job->dstBuff;
DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u",
DEBUGLOG(5, "job (first:%u) (last:%u) : dictSize %u, srcSize %u",
job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
if (job->cdict) { /* should only happen for first segment */
size_t const initError = ZSTD_compressBegin_usingCDict_advanced(job->cctx, job->cdict, job->params.fParams, job->fullFrameSize);
if (job->cdict) DEBUGLOG(3, "using CDict ");
DEBUGLOG(5, "using CDict");
if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
} else { /* srcStart points at reloaded section */
if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0; /* ensure no srcSize control */
@ -247,12 +281,12 @@ void ZSTDMT_compressChunk(void* jobDescription)
ZSTD_invalidateRepCodes(job->cctx);
}
DEBUGLOG(4, "Compressing : ");
DEBUGLOG(5, "Compressing : ");
DEBUG_PRINTHEX(4, job->srcStart, 12);
job->cSize = (job->lastChunk) ?
ZSTD_compressEnd (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
DEBUGLOG(3, "compressed %u bytes into %u bytes (first:%u) (last:%u)",
DEBUGLOG(5, "compressed %u bytes into %u bytes (first:%u) (last:%u)",
(unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
DEBUGLOG(5, "dstBuff.size : %u ; => %s", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize));
@ -271,6 +305,7 @@ void ZSTDMT_compressChunk(void* jobDescription)
struct ZSTDMT_CCtx_s {
POOL_ctx* factory;
ZSTDMT_jobDescription* jobs;
ZSTDMT_bufferPool* buffPool;
ZSTDMT_CCtxPool* cctxPool;
pthread_mutex_t jobCompleted_mutex;
@ -292,50 +327,64 @@ struct ZSTDMT_CCtx_s {
unsigned overlapRLog;
unsigned long long frameContentSize;
size_t sectionSize;
ZSTD_CDict* cdict;
ZSTD_CStream* cstream;
ZSTDMT_jobDescription jobs[1]; /* variable size (must lies at the end) */
ZSTD_customMem cMem;
ZSTD_CDict* cdictLocal;
const ZSTD_CDict* cdict;
};
ZSTDMT_CCtx *ZSTDMT_createCCtx(unsigned nbThreads)
static ZSTDMT_jobDescription* ZSTDMT_allocJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
{
ZSTDMT_CCtx* cctx;
U32 const minNbJobs = nbThreads + 2;
U32 const nbJobsLog2 = ZSTD_highbit32(minNbJobs) + 1;
U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
U32 const nbJobs = 1 << nbJobsLog2;
DEBUGLOG(5, "nbThreads : %u ; minNbJobs : %u ; nbJobsLog2 : %u ; nbJobs : %u \n",
nbThreads, minNbJobs, nbJobsLog2, nbJobs);
*nbJobsPtr = nbJobs;
return (ZSTDMT_jobDescription*) ZSTD_calloc(
nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
}
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
{
ZSTDMT_CCtx* mtctx;
U32 nbJobs = nbThreads + 2;
DEBUGLOG(3, "ZSTDMT_createCCtx_advanced");
if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL;
cctx = (ZSTDMT_CCtx*) calloc(1, sizeof(ZSTDMT_CCtx) + nbJobs*sizeof(ZSTDMT_jobDescription));
if (!cctx) return NULL;
cctx->nbThreads = nbThreads;
cctx->jobIDMask = nbJobs - 1;
cctx->allJobsCompleted = 1;
cctx->sectionSize = 0;
cctx->overlapRLog = 3;
cctx->factory = POOL_create(nbThreads, 1);
cctx->buffPool = ZSTDMT_createBufferPool(nbThreads);
cctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads);
if (!cctx->factory | !cctx->buffPool | !cctx->cctxPool) { /* one object was not created */
ZSTDMT_freeCCtx(cctx);
if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
/* invalid custom allocator */
return NULL;
mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
if (!mtctx) return NULL;
mtctx->cMem = cMem;
mtctx->nbThreads = nbThreads;
mtctx->allJobsCompleted = 1;
mtctx->sectionSize = 0;
mtctx->overlapRLog = 3;
mtctx->factory = POOL_create(nbThreads, 1);
mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, cMem);
mtctx->jobIDMask = nbJobs - 1;
mtctx->buffPool = ZSTDMT_createBufferPool(nbThreads, cMem);
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads, cMem);
if (!mtctx->factory | !mtctx->jobs | !mtctx->buffPool | !mtctx->cctxPool) {
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
if (nbThreads==1) {
cctx->cstream = ZSTD_createCStream();
if (!cctx->cstream) {
ZSTDMT_freeCCtx(cctx); return NULL;
} }
pthread_mutex_init(&cctx->jobCompleted_mutex, NULL); /* Todo : check init function return */
pthread_cond_init(&cctx->jobCompleted_cond, NULL);
DEBUGLOG(4, "mt_cctx created, for %u threads \n", nbThreads);
return cctx;
pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL); /* Todo : check init function return */
pthread_cond_init(&mtctx->jobCompleted_cond, NULL);
DEBUGLOG(3, "mt_cctx created, for %u threads", nbThreads);
return mtctx;
}
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads)
{
return ZSTDMT_createCCtx_advanced(nbThreads, ZSTD_defaultCMem);
}
/* ZSTDMT_releaseAllJobResources() :
* Ensure all workers are killed first. */
* note : ensure all workers are killed first ! */
static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
{
unsigned jobID;
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff);
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
@ -356,15 +405,26 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
POOL_free(mtctx->factory);
if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */
ZSTDMT_freeBufferPool(mtctx->buffPool); /* release job resources into pools first */
ZSTD_free(mtctx->jobs, mtctx->cMem);
ZSTDMT_freeCCtxPool(mtctx->cctxPool);
ZSTD_freeCDict(mtctx->cdict);
ZSTD_freeCStream(mtctx->cstream);
ZSTD_freeCDict(mtctx->cdictLocal);
pthread_mutex_destroy(&mtctx->jobCompleted_mutex);
pthread_cond_destroy(&mtctx->jobCompleted_cond);
free(mtctx);
ZSTD_free(mtctx, mtctx->cMem);
return 0;
}
size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
{
if (mtctx == NULL) return 0; /* supports sizeof NULL */
return sizeof(*mtctx)
+ POOL_sizeof(mtctx->factory)
+ ZSTDMT_sizeof_bufferPool(mtctx->buffPool)
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
+ ZSTD_sizeof_CDict(mtctx->cdictLocal);
}
size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value)
{
switch(parameter)
@ -373,7 +433,7 @@ size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter,
mtctx->sectionSize = value;
return 0;
case ZSTDMT_p_overlapSectionLog :
DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
DEBUGLOG(5, "ZSTDMT_p_overlapSectionLog : %u", value);
mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value;
return 0;
default :
@ -386,31 +446,49 @@ size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter,
/* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */
size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
static unsigned computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbThreads) {
size_t const chunkSizeTarget = (size_t)1 << (windowLog + 2);
size_t const chunkMaxSize = chunkSizeTarget << 2;
size_t const passSizeMax = chunkMaxSize * nbThreads;
unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
unsigned const nbChunksLarge = multiplier * nbThreads;
unsigned const nbChunksMax = (unsigned)(srcSize / chunkSizeTarget) + 1;
unsigned const nbChunksSmall = MIN(nbChunksMax, nbThreads);
return (multiplier>1) ? nbChunksLarge : nbChunksSmall;
}
size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
const ZSTD_CDict* cdict,
ZSTD_parameters const params,
unsigned overlapRLog)
{
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3;
size_t const overlapSize = (size_t)1 << (params.cParams.windowLog - overlapLog);
size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2);
unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + 1;
unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads);
size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog);
unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, mtctx->nbThreads);
size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */
size_t remainingSrcSize = srcSize;
size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0x7FFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */
const char* const srcStart = (const char*)src;
size_t remainingSrcSize = srcSize;
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */
size_t frameStartPos = 0, dstBufferPos = 0;
DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes ", params.cParams.windowLog, (U32)chunkTargetSize);
DEBUGLOG(2, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize);
params.fParams.contentSizeFlag = 1;
DEBUGLOG(4, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize);
if (nbChunks==1) { /* fallback to single-thread mode */
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
return ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, params.fParams);
return ZSTD_compress_advanced(cctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
}
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is useful to avoid allocating extra buffers */
if (nbChunks > mtctx->jobIDMask+1) { /* enlarge job table */
U32 nbJobs = nbChunks;
ZSTD_free(mtctx->jobs, mtctx->cMem);
mtctx->jobIDMask = 0;
mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, mtctx->cMem);
if (mtctx->jobs==NULL) return ERROR(memory_allocation);
mtctx->jobIDMask = nbJobs - 1;
}
{ unsigned u;
@ -425,15 +503,18 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
if ((cctx==NULL) || (dstBuffer.start==NULL)) {
mtctx->jobs[u].cSize = ERROR(memory_allocation); /* job result */
mtctx->jobs[u].jobCompleted = 1;
nbChunks = u+1;
nbChunks = u+1; /* only wait and free u jobs, instead of initially expected nbChunks ones */
break; /* let's wait for previous jobs to complete, but don't start new ones */
}
mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize;
mtctx->jobs[u].dictSize = dictSize;
mtctx->jobs[u].srcSize = chunkSize;
mtctx->jobs[u].cdict = mtctx->nextJobID==0 ? cdict : NULL;
mtctx->jobs[u].fullFrameSize = srcSize;
mtctx->jobs[u].params = params;
/* do not calculate checksum within sections, but write it in header for first section */
if (u!=0) mtctx->jobs[u].params.fParams.checksumFlag = 0;
mtctx->jobs[u].dstBuff = dstBuffer;
mtctx->jobs[u].cctx = cctx;
mtctx->jobs[u].firstChunk = (u==0);
@ -442,27 +523,27 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex;
mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond;
DEBUGLOG(3, "posting job %u (%u bytes)", u, (U32)chunkSize);
DEBUG_PRINTHEX(3, mtctx->jobs[u].srcStart, 12);
DEBUGLOG(5, "posting job %u (%u bytes)", u, (U32)chunkSize);
DEBUG_PRINTHEX(6, mtctx->jobs[u].srcStart, 12);
POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]);
frameStartPos += chunkSize;
dstBufferPos += dstBufferCapacity;
remainingSrcSize -= chunkSize;
} }
/* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */
/* collect result */
{ unsigned chunkID;
size_t error = 0, dstPos = 0;
for (chunkID=0; chunkID<nbChunks; chunkID++) {
DEBUGLOG(3, "waiting for chunk %u ", chunkID);
DEBUGLOG(5, "waiting for chunk %u ", chunkID);
PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
while (mtctx->jobs[chunkID].jobCompleted==0) {
DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", chunkID);
DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", chunkID);
pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex);
}
pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
DEBUGLOG(3, "ready to write chunk %u ", chunkID);
DEBUGLOG(5, "ready to write chunk %u ", chunkID);
ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx);
mtctx->jobs[chunkID].cctx = NULL;
@ -470,20 +551,33 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
{ size_t const cSize = mtctx->jobs[chunkID].cSize;
if (ZSTD_isError(cSize)) error = cSize;
if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
if (chunkID) { /* note : chunk 0 is already written directly into dst */
if (chunkID) { /* note : chunk 0 is written directly at dst, which is correct position */
if (!error)
memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap if chunk decompressed within dst */
if (chunkID >= compressWithinDst) /* otherwise, it decompresses within dst */
memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap when chunk compressed within dst */
if (chunkID >= compressWithinDst) { /* chunk compressed into its own buffer, which must be released */
DEBUGLOG(5, "releasing buffer %u>=%u", chunkID, compressWithinDst);
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
}
mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
}
dstPos += cSize ;
}
}
if (!error) DEBUGLOG(3, "compressed size : %u ", (U32)dstPos);
if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
return error ? error : dstPos;
}
}
size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
{
U32 const overlapRLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3;
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
params.fParams.contentSizeFlag = 1;
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapRLog);
}
@ -491,12 +585,14 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
/* ======= Streaming API ======= */
/* ====================================== */
static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) {
static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs)
{
DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
while (zcs->doneJobID < zcs->nextJobID) {
unsigned const jobID = zcs->doneJobID & zcs->jobIDMask;
PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
while (zcs->jobs[jobID].jobCompleted==0) {
DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */
DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */
pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);
}
pthread_mutex_unlock(&zcs->jobCompleted_mutex);
@ -505,33 +601,54 @@ static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) {
}
static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
const void* dict, size_t dictSize, unsigned updateDict,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
/** ZSTDMT_initCStream_internal() :
* internal usage only */
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
ZSTD_customMem const cmem = { NULL, NULL, NULL };
DEBUGLOG(3, "Started new compression, with windowLog : %u", params.cParams.windowLog);
if (zcs->nbThreads==1) return ZSTD_initCStream_advanced(zcs->cstream, dict, dictSize, params, pledgedSrcSize);
if (zcs->allJobsCompleted == 0) { /* previous job not correctly finished */
DEBUGLOG(4, "ZSTDMT_initCStream_internal");
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (zcs->nbThreads==1) {
DEBUGLOG(4, "single thread mode");
return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0],
dict, dictSize, cdict,
params, pledgedSrcSize);
}
if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */
ZSTDMT_waitForAllJobsCompleted(zcs);
ZSTDMT_releaseAllJobResources(zcs);
zcs->allJobsCompleted = 1;
}
zcs->params = params;
if (updateDict) {
ZSTD_freeCDict(zcs->cdict); zcs->cdict = NULL;
if (dict && dictSize) {
zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params.cParams, cmem);
if (zcs->cdict == NULL) return ERROR(memory_allocation);
} }
zcs->frameContentSize = pledgedSrcSize;
if (dict) {
DEBUGLOG(4,"cdictLocal: %08X", (U32)(size_t)zcs->cdictLocal);
ZSTD_freeCDict(zcs->cdictLocal);
zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
0 /* byRef */, ZSTD_dm_auto, /* note : a loadPrefix becomes an internal CDict */
params.cParams, zcs->cMem);
zcs->cdict = zcs->cdictLocal;
if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
} else {
DEBUGLOG(4,"cdictLocal: %08X", (U32)(size_t)zcs->cdictLocal);
ZSTD_freeCDict(zcs->cdictLocal);
zcs->cdictLocal = NULL;
zcs->cdict = cdict;
}
zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog);
DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog);
DEBUGLOG(3, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
DEBUGLOG(4, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2);
zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize);
zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize);
DEBUGLOG(3, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
DEBUGLOG(4, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
zcs->marginSize = zcs->targetSectionSize >> 2;
zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize;
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
@ -546,24 +663,39 @@ static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
return 0;
}
size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* zcs,
size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
return ZSTDMT_initCStream_internal(zcs, dict, dictSize, 1, params, pledgedSrcSize);
DEBUGLOG(5, "ZSTDMT_initCStream_advanced");
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, NULL, params, pledgedSrcSize);
}
size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize)
{
ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict);
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
params.fParams = fParams;
return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, cdict,
params, pledgedSrcSize);
}
/* ZSTDMT_resetCStream() :
* pledgedSrcSize is optional and can be zero == unknown */
size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize)
{
if (zcs->nbThreads==1) return ZSTD_resetCStream(zcs->cstream, pledgedSrcSize);
if (zcs->nbThreads==1)
return ZSTD_resetCStream(zcs->cctxPool->cctx[0], pledgedSrcSize);
return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize);
}
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
return ZSTDMT_initCStream_internal(zcs, NULL, 0, 1, params, 0);
return ZSTDMT_initCStream_internal(zcs, NULL, 0, NULL, params, 0);
}
@ -582,13 +714,16 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
return ERROR(memory_allocation);
}
DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ", zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize);
DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ",
zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize);
zcs->jobs[jobID].src = zcs->inBuff.buffer;
zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start;
zcs->jobs[jobID].srcSize = srcSize;
zcs->jobs[jobID].dictSize = zcs->dictSize; /* note : zcs->inBuff.filled is presumed >= srcSize + dictSize */
zcs->jobs[jobID].dictSize = zcs->dictSize;
assert(zcs->inBuff.filled >= srcSize + zcs->dictSize);
zcs->jobs[jobID].params = zcs->params;
if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0; /* do not calculate checksum within sections, just keep it in header for first section */
/* do not calculate checksum within sections, but write it in header for first section */
if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0;
zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL;
zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize;
zcs->jobs[jobID].dstBuff = dstBuffer;
@ -603,6 +738,7 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
/* get a new buffer for next input */
if (!endFrame) {
size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize);
DEBUGLOG(5, "ZSTDMT_createCompressionJob::endFrame = %u", endFrame);
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */
zcs->jobs[jobID].jobCompleted = 1;
@ -611,22 +747,33 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
ZSTDMT_releaseAllJobResources(zcs);
return ERROR(memory_allocation);
}
DEBUGLOG(5, "inBuff filled to %u", (U32)zcs->inBuff.filled);
DEBUGLOG(5, "inBuff currently filled to %u", (U32)zcs->inBuff.filled);
zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize;
DEBUGLOG(5, "new job : filled to %u, with %u dict and %u src", (U32)zcs->inBuff.filled, (U32)newDictSize, (U32)(zcs->inBuff.filled - newDictSize));
memmove(zcs->inBuff.buffer.start, (const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, zcs->inBuff.filled);
DEBUGLOG(5, "new job : inBuff filled to %u, with %u dict and %u src",
(U32)zcs->inBuff.filled, (U32)newDictSize,
(U32)(zcs->inBuff.filled - newDictSize));
memmove(zcs->inBuff.buffer.start,
(const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize,
zcs->inBuff.filled);
DEBUGLOG(5, "new inBuff pre-filled");
zcs->dictSize = newDictSize;
} else {
} else { /* if (endFrame==1) */
DEBUGLOG(5, "ZSTDMT_createCompressionJob::endFrame = %u", endFrame);
zcs->inBuff.buffer = g_nullBuffer;
zcs->inBuff.filled = 0;
zcs->dictSize = 0;
zcs->frameEnded = 1;
if (zcs->nextJobID == 0)
zcs->params.fParams.checksumFlag = 0; /* single chunk : checksum is calculated directly within worker thread */
/* single chunk exception : checksum is calculated directly within worker thread */
zcs->params.fParams.checksumFlag = 0;
}
DEBUGLOG(3, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)", zcs->nextJobID, (U32)zcs->jobs[jobID].srcSize, zcs->jobs[jobID].lastChunk, zcs->doneJobID, zcs->doneJobID & zcs->jobIDMask);
DEBUGLOG(4, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)",
zcs->nextJobID,
(U32)zcs->jobs[jobID].srcSize,
zcs->jobs[jobID].lastChunk,
zcs->doneJobID,
zcs->doneJobID & zcs->jobIDMask);
POOL_add(zcs->factory, ZSTDMT_compressChunk, &zcs->jobs[jobID]); /* this call is blocking when thread worker pool is exhausted */
zcs->nextJobID++;
return 0;
@ -664,7 +811,7 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize);
if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */
U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
DEBUGLOG(4, "writing checksum : %08X \n", checksum);
DEBUGLOG(5, "writing checksum : %08X \n", checksum);
MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum);
job.cSize += 4;
zcs->jobs[wJobID].cSize += 4;
@ -675,7 +822,7 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
zcs->jobs[wJobID].jobScanned = 1;
}
{ size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos);
DEBUGLOG(4, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID);
DEBUGLOG(5, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID);
memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite);
output->pos += toWrite;
job.dstFlushed += toWrite;
@ -696,26 +843,81 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
} }
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
/** ZSTDMT_compressStream_generic() :
* internal use only
* assumption : output and input are valid (pos <= size)
* @return : minimum amount of data remaining to flush, 0 if none */
size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
{
size_t const newJobThreshold = zcs->dictSize + zcs->targetSectionSize + zcs->marginSize;
if (zcs->frameEnded) return ERROR(stage_wrong); /* current frame being ended. Only flush is allowed. Restart with init */
if (zcs->nbThreads==1) return ZSTD_compressStream(zcs->cstream, output, input);
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize + mtctx->marginSize;
assert(output->pos <= output->size);
assert(input->pos <= input->size);
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
/* current frame being ended. Only flush/end are allowed. Or start new frame with init */
return ERROR(stage_wrong);
}
if (mtctx->nbThreads==1) {
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
}
/* single-pass shortcut (note : this is blocking-mode) */
if ( (mtctx->nextJobID==0) /* just started */
&& (mtctx->inBuff.filled==0) /* nothing buffered */
&& (endOp==ZSTD_e_end) /* end order */
&& (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough room */
size_t const cSize = ZSTDMT_compress_advanced(mtctx,
(char*)output->dst + output->pos, output->size - output->pos,
(const char*)input->src + input->pos, input->size - input->pos,
mtctx->cdict, mtctx->params, mtctx->overlapRLog);
if (ZSTD_isError(cSize)) return cSize;
input->pos = input->size;
output->pos += cSize;
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer); /* was allocated in initStream */
mtctx->allJobsCompleted = 1;
mtctx->frameEnded = 1;
return 0;
}
/* fill input buffer */
{ size_t const toLoad = MIN(input->size - input->pos, zcs->inBuffSize - zcs->inBuff.filled);
memcpy((char*)zcs->inBuff.buffer.start + zcs->inBuff.filled, input->src, toLoad);
if ((input->src) && (mtctx->inBuff.buffer.start)) { /* support NULL input */
size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
DEBUGLOG(2, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
input->pos += toLoad;
zcs->inBuff.filled += toLoad;
mtctx->inBuff.filled += toLoad;
}
if ( (zcs->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */
&& (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) { /* avoid overwriting job round buffer */
CHECK_F( ZSTDMT_createCompressionJob(zcs, zcs->targetSectionSize, 0) );
if ( (mtctx->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */
&& (mtctx->nextJobID <= mtctx->doneJobID + mtctx->jobIDMask) ) { /* avoid overwriting job round buffer */
CHECK_F( ZSTDMT_createCompressionJob(mtctx, mtctx->targetSectionSize, 0 /* endFrame */) );
}
/* check for data to flush */
CHECK_F( ZSTDMT_flushNextJob(zcs, output, (zcs->inBuff.filled == zcs->inBuffSize)) ); /* block if it wasn't possible to create new job due to saturation */
/* check for potential compressed data ready to be flushed */
CHECK_F( ZSTDMT_flushNextJob(mtctx, output, (mtctx->inBuff.filled == mtctx->inBuffSize) /* blockToFlush */) ); /* block if it wasn't possible to create new job due to saturation */
if (input->pos < input->size) /* input not consumed : do not flush yet */
endOp = ZSTD_e_continue;
switch(endOp)
{
case ZSTD_e_flush:
return ZSTDMT_flushStream(mtctx, output);
case ZSTD_e_end:
return ZSTDMT_endStream(mtctx, output);
case ZSTD_e_continue:
return 1;
default:
return ERROR(GENERIC); /* invalid endDirective */
}
}
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
CHECK_F( ZSTDMT_compressStream_generic(zcs, output, input, ZSTD_e_continue) );
/* recommended next input size : fill current input buffer */
return zcs->inBuffSize - zcs->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
@ -726,26 +928,28 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* outp
{
size_t const srcSize = zcs->inBuff.filled - zcs->dictSize;
if (srcSize) DEBUGLOG(4, "flushing : %u bytes left to compress", (U32)srcSize);
if ( ((srcSize > 0) || (endFrame && !zcs->frameEnded))
&& (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) {
CHECK_F( ZSTDMT_createCompressionJob(zcs, srcSize, endFrame) );
}
/* check if there is any data available to flush */
DEBUGLOG(5, "zcs->doneJobID : %u ; zcs->nextJobID : %u ", zcs->doneJobID, zcs->nextJobID);
return ZSTDMT_flushNextJob(zcs, output, 1);
return ZSTDMT_flushNextJob(zcs, output, 1 /* blockToFlush */);
}
size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
{
if (zcs->nbThreads==1) return ZSTD_flushStream(zcs->cstream, output);
return ZSTDMT_flushStream_internal(zcs, output, 0);
DEBUGLOG(5, "ZSTDMT_flushStream");
if (zcs->nbThreads==1)
return ZSTD_flushStream(zcs->cctxPool->cctx[0], output);
return ZSTDMT_flushStream_internal(zcs, output, 0 /* endFrame */);
}
size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
{
if (zcs->nbThreads==1) return ZSTD_endStream(zcs->cstream, output);
return ZSTDMT_flushStream_internal(zcs, output, 1);
DEBUGLOG(4, "ZSTDMT_endStream");
if (zcs->nbThreads==1)
return ZSTD_endStream(zcs->cctxPool->cctx[0], output);
return ZSTDMT_flushStream_internal(zcs, output, 1 /* endFrame */);
}

View File

@ -15,25 +15,34 @@
#endif
/* Note : All prototypes defined in this file shall be considered experimental.
* There is no guarantee of API continuity (yet) on any of these prototypes */
/* Note : All prototypes defined in this file are labelled experimental.
* No guarantee of API continuity is provided on any of them.
* In fact, the expectation is that these prototypes will be replaced
* by ZSTD_compress_generic() API in the near future */
/* === Dependencies === */
#include <stddef.h> /* size_t */
#include <stddef.h> /* size_t */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
/* === Simple one-pass functions === */
/* === Memory management === */
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads);
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* cctx);
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads,
ZSTD_customMem cMem);
ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
/* === Simple buffer-to-butter one-pass function === */
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
/* === Streaming functions === */
@ -53,8 +62,22 @@ ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);
# define ZSTDMT_SECTION_SIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */
#endif
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, /**< dict can be released after init, a local copy is preserved within zcs */
ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters const params,
unsigned overlapRLog);
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
ZSTD_parameters params,
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fparams,
unsigned long long pledgedSrcSize); /* note : zero means empty */
/* ZSDTMT_parameter :
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
@ -71,6 +94,19 @@ typedef enum {
ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value);
/*! ZSTDMT_compressStream_generic() :
* Combines ZSTDMT_compressStream() with ZSTDMT_flushStream() or ZSTDMT_endStream()
* depending on flush directive.
* @return : minimum amount of data still to be flushed
* 0 if fully flushed
* or an error code */
ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp);
#if defined (__cplusplus)
}
#endif

View File

@ -67,6 +67,12 @@
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
* Byte alignment for workSpace management
****************************************************************/
#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
/*-***************************/
/* generic DTableDesc */
/*-***************************/
@ -87,16 +93,28 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
U32* rankVal;
BYTE* huffWeight;
size_t spaceUsed32 = 0;
rankVal = (U32 *)workSpace + spaceUsed32;
spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
if ((spaceUsed32 << 2) > wkspSize)
return ERROR(tableLog_tooLarge);
workSpace = (U32 *)workSpace + spaceUsed32;
wkspSize -= (spaceUsed32 << 2);
HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
@ -135,6 +153,13 @@ size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
return iSize;
}
size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_readDTableX2_wksp(DTable, src, srcSize,
workSpace, sizeof(workSpace));
}
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
{
@ -212,11 +237,13 @@ size_t HUF_decompress1X2_usingDTable(
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2 (DCtx, cSrc, cSrcSize);
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@ -224,6 +251,15 @@ size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, cons
return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
}
size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
@ -335,11 +371,14 @@ size_t HUF_decompress4X2_usingDTable(
}
size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2 (dctx, cSrc, cSrcSize);
size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize,
workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@ -347,6 +386,13 @@ size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, cons
return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
}
size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
@ -403,7 +449,8 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co
} }
}
typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
@ -447,20 +494,43 @@ static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
}
}
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
size_t srcSize, void* workSpace,
size_t wkspSize)
{
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 };
U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
U32 const maxTableLog = dtd.maxTableLog;
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
U32 *rankStart;
rankValCol_t* rankVal;
U32* rankStats;
U32* rankStart0;
sortedSymbol_t* sortedSymbol;
BYTE* weightList;
size_t spaceUsed32 = 0;
rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
rankStats = (U32 *)workSpace + spaceUsed32;
spaceUsed32 += HUF_TABLELOG_MAX + 1;
rankStart0 = (U32 *)workSpace + spaceUsed32;
spaceUsed32 += HUF_TABLELOG_MAX + 2;
sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
if ((spaceUsed32 << 2) > wkspSize)
return ERROR(tableLog_tooLarge);
workSpace = (U32 *)workSpace + spaceUsed32;
wkspSize -= (spaceUsed32 << 2);
rankStart = rankStart0 + 1;
memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
@ -527,6 +597,12 @@ size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
return iSize;
}
size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_readDTableX4_wksp(DTable, src, srcSize,
workSpace, sizeof(workSpace));
}
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
@ -545,7 +621,8 @@ static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DE
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
} }
return 1;
}
@ -626,11 +703,14 @@ size_t HUF_decompress1X4_usingDTable(
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX4 (DCtx, cSrc, cSrcSize);
size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize,
workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@ -638,6 +718,15 @@ size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, cons
return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
}
size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
@ -748,11 +837,14 @@ size_t HUF_decompress4X4_usingDTable(
}
size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX4 (dctx, cSrc, cSrcSize);
size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize,
workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@ -760,6 +852,15 @@ size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, cons
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
}
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
@ -861,19 +962,32 @@ size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const
}
}
size_t HUF_decompress4X_hufOnly (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
size_t dstSize, const void* cSrc,
size_t cSrcSize, void* workSpace,
size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
}
}
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
@ -882,7 +996,17 @@ size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize):
HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize);
}
}
size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}

File diff suppressed because it is too large Load Diff

View File

@ -398,7 +398,8 @@ typedef struct {
*/
static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
COVER_map_t *activeDmers, U32 begin,
U32 end, COVER_params_t parameters) {
U32 end,
ZDICT_cover_params_t parameters) {
/* Constants */
const U32 k = parameters.k;
const U32 d = parameters.d;
@ -478,7 +479,7 @@ static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
* Check the validity of the parameters.
* Returns non-zero if the parameters are valid and 0 otherwise.
*/
static int COVER_checkParameters(COVER_params_t parameters) {
static int COVER_checkParameters(ZDICT_cover_params_t parameters) {
/* k and d are required parameters */
if (parameters.d == 0 || parameters.k == 0) {
return 0;
@ -600,7 +601,7 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
COVER_map_t *activeDmers, void *dictBuffer,
size_t dictBufferCapacity,
COVER_params_t parameters) {
ZDICT_cover_params_t parameters) {
BYTE *const dict = (BYTE *)dictBuffer;
size_t tail = dictBufferCapacity;
/* Divide the data up into epochs of equal size.
@ -639,22 +640,10 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
return tail;
}
/**
* Translate from COVER_params_t to ZDICT_params_t required for finalizing the
* dictionary.
*/
static ZDICT_params_t COVER_translateParams(COVER_params_t parameters) {
ZDICT_params_t zdictParams;
memset(&zdictParams, 0, sizeof(zdictParams));
zdictParams.notificationLevel = 1;
zdictParams.dictID = parameters.dictID;
zdictParams.compressionLevel = parameters.compressionLevel;
return zdictParams;
}
ZDICTLIB_API size_t COVER_trainFromBuffer(
ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
const size_t *samplesSizes, unsigned nbSamples, COVER_params_t parameters) {
const size_t *samplesSizes, unsigned nbSamples,
ZDICT_cover_params_t parameters) {
BYTE *const dict = (BYTE *)dictBuffer;
COVER_ctx_t ctx;
COVER_map_t activeDmers;
@ -673,7 +662,7 @@ ZDICTLIB_API size_t COVER_trainFromBuffer(
return ERROR(dstSize_tooSmall);
}
/* Initialize global data */
g_displayLevel = parameters.notificationLevel;
g_displayLevel = parameters.zParams.notificationLevel;
/* Initialize context and activeDmers */
if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
parameters.d)) {
@ -690,10 +679,9 @@ ZDICTLIB_API size_t COVER_trainFromBuffer(
const size_t tail =
COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
dictBufferCapacity, parameters);
ZDICT_params_t zdictParams = COVER_translateParams(parameters);
const size_t dictionarySize = ZDICT_finalizeDictionary(
dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
samplesBuffer, samplesSizes, nbSamples, zdictParams);
samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
if (!ZSTD_isError(dictionarySize)) {
DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
(U32)dictionarySize);
@ -718,7 +706,7 @@ typedef struct COVER_best_s {
size_t liveJobs;
void *dict;
size_t dictSize;
COVER_params_t parameters;
ZDICT_cover_params_t parameters;
size_t compressedSize;
} COVER_best_t;
@ -786,7 +774,7 @@ static void COVER_best_start(COVER_best_t *best) {
* If this dictionary is the best so far save it and its parameters.
*/
static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
COVER_params_t parameters, void *dict,
ZDICT_cover_params_t parameters, void *dict,
size_t dictSize) {
if (!best) {
return;
@ -830,7 +818,7 @@ typedef struct COVER_tryParameters_data_s {
const COVER_ctx_t *ctx;
COVER_best_t *best;
size_t dictBufferCapacity;
COVER_params_t parameters;
ZDICT_cover_params_t parameters;
} COVER_tryParameters_data_t;
/**
@ -842,7 +830,7 @@ static void COVER_tryParameters(void *opaque) {
/* Save parameters as local variables */
COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
const COVER_ctx_t *const ctx = data->ctx;
const COVER_params_t parameters = data->parameters;
const ZDICT_cover_params_t parameters = data->parameters;
size_t dictBufferCapacity = data->dictBufferCapacity;
size_t totalCompressedSize = ERROR(GENERIC);
/* Allocate space for hash table, dict, and freqs */
@ -863,10 +851,10 @@ static void COVER_tryParameters(void *opaque) {
{
const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
dictBufferCapacity, parameters);
const ZDICT_params_t zdictParams = COVER_translateParams(parameters);
dictBufferCapacity = ZDICT_finalizeDictionary(
dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, zdictParams);
ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples,
parameters.zParams);
if (ZDICT_isError(dictBufferCapacity)) {
DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
goto _cleanup;
@ -892,8 +880,8 @@ static void COVER_tryParameters(void *opaque) {
}
/* Create the cctx and cdict */
cctx = ZSTD_createCCtx();
cdict =
ZSTD_createCDict(dict, dictBufferCapacity, parameters.compressionLevel);
cdict = ZSTD_createCDict(dict, dictBufferCapacity,
parameters.zParams.compressionLevel);
if (!dst || !cctx || !cdict) {
goto _compressCleanup;
}
@ -930,12 +918,10 @@ static void COVER_tryParameters(void *opaque) {
}
}
ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer,
size_t dictBufferCapacity,
const void *samplesBuffer,
const size_t *samplesSizes,
unsigned nbSamples,
COVER_params_t *parameters) {
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
const size_t *samplesSizes, unsigned nbSamples,
ZDICT_cover_params_t *parameters) {
/* constants */
const unsigned nbThreads = parameters->nbThreads;
const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
@ -947,7 +933,7 @@ ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer,
const unsigned kIterations =
(1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
/* Local variables */
const int displayLevel = parameters->notificationLevel;
const int displayLevel = parameters->zParams.notificationLevel;
unsigned iteration = 1;
unsigned d;
unsigned k;
@ -976,7 +962,7 @@ ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer,
/* Initialization */
COVER_best_init(&best);
/* Turn down global display level to clean up display at level 2 and below */
g_displayLevel = parameters->notificationLevel - 1;
g_displayLevel = parameters->zParams.notificationLevel - 1;
/* Loop through d first because each new value needs a new context */
LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
kIterations);

View File

@ -94,7 +94,7 @@ const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(error
unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
{
if (dictSize < 8) return 0;
if (MEM_readLE32(dictBuffer) != ZSTD_DICT_MAGIC) return 0;
if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
return MEM_readLE32((const char*)dictBuffer + 4);
}
@ -487,7 +487,7 @@ static U32 ZDICT_dictSize(const dictItem* dictList)
}
static size_t ZDICT_trainBuffer(dictItem* dictList, U32 dictListSize,
static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
const size_t* fileSizes, unsigned nbFiles,
U32 minRatio, U32 notificationLevel)
@ -576,7 +576,7 @@ typedef struct
{
ZSTD_CCtx* ref;
ZSTD_CCtx* zc;
void* workPlace; /* must be ZSTD_BLOCKSIZE_ABSOLUTEMAX allocated */
void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */
} EStats_ress_t;
#define MAXREPOFFSET 1024
@ -585,14 +585,14 @@ static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets,
const void* src, size_t srcSize, U32 notificationLevel)
{
size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << params.cParams.windowLog);
size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog);
size_t cSize;
if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */
{ size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0);
if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
}
cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_ABSOLUTEMAX, src, srcSize);
cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
if (cSize) { /* if == 0; block is not compressible */
@ -634,17 +634,6 @@ static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
} } }
}
/*
static size_t ZDICT_maxSampleSize(const size_t* fileSizes, unsigned nbFiles)
{
unsigned u;
size_t max=0;
for (u=0; u<nbFiles; u++)
if (max < fileSizes[u]) max = fileSizes[u];
return max;
}
*/
static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
{
size_t total=0;
@ -700,7 +689,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
/* init */
esr.ref = ZSTD_createCCtx();
esr.zc = ZSTD_createCCtx();
esr.workPlace = malloc(ZSTD_BLOCKSIZE_ABSOLUTEMAX);
esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
if (!esr.ref || !esr.zc || !esr.workPlace) {
eSize = ERROR(memory_allocation);
DISPLAYLEVEL(1, "Not enough memory \n");
@ -865,7 +854,7 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
/* dictionary header */
MEM_writeLE32(header, ZSTD_DICT_MAGIC);
MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
{ U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
U32 const dictID = params.dictID ? params.dictID : compliantID;
@ -917,7 +906,7 @@ size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictCo
}
/* add dictionary header (after entropy tables) */
MEM_writeLE32(dictBuffer, ZSTD_DICT_MAGIC);
MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
{ U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
U32 const dictID = params.dictID ? params.dictID : compliantID;
@ -930,14 +919,14 @@ size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictCo
}
/*! ZDICT_trainFromBuffer_unsafe() :
/*! ZDICT_trainFromBuffer_unsafe_legacy() :
* Warning : `samplesBuffer` must be followed by noisy guard band.
* @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
*/
size_t ZDICT_trainFromBuffer_unsafe(
size_t ZDICT_trainFromBuffer_unsafe_legacy(
void* dictBuffer, size_t maxDictSize,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t params)
ZDICT_legacy_params_t params)
{
U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
@ -946,7 +935,7 @@ size_t ZDICT_trainFromBuffer_unsafe(
size_t const targetDictSize = maxDictSize;
size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
size_t dictSize = 0;
U32 const notificationLevel = params.notificationLevel;
U32 const notificationLevel = params.zParams.notificationLevel;
/* checks */
if (!dictList) return ERROR(memory_allocation);
@ -957,13 +946,13 @@ size_t ZDICT_trainFromBuffer_unsafe(
ZDICT_initDictItem(dictList);
/* build dictionary */
ZDICT_trainBuffer(dictList, dictListSize,
samplesBuffer, samplesBuffSize,
samplesSizes, nbSamples,
minRep, notificationLevel);
ZDICT_trainBuffer_legacy(dictList, dictListSize,
samplesBuffer, samplesBuffSize,
samplesSizes, nbSamples,
minRep, notificationLevel);
/* display best matches */
if (params.notificationLevel>= 3) {
if (params.zParams.notificationLevel>= 3) {
U32 const nb = MIN(25, dictList[0].pos);
U32 const dictContentSize = ZDICT_dictSize(dictList);
U32 u;
@ -1026,7 +1015,7 @@ size_t ZDICT_trainFromBuffer_unsafe(
dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
samplesBuffer, samplesSizes, nbSamples,
params);
params.zParams);
}
/* clean up */
@ -1037,9 +1026,9 @@ size_t ZDICT_trainFromBuffer_unsafe(
/* issue : samplesBuffer need to be followed by a noisy guard band.
* work around : duplicate the buffer, and add the noise */
size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t params)
size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_legacy_params_t params)
{
size_t result;
void* newBuff;
@ -1052,10 +1041,9 @@ size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacit
memcpy(newBuff, samplesBuffer, sBuffSize);
ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */
result = ZDICT_trainFromBuffer_unsafe(
dictBuffer, dictBufferCapacity,
newBuff, samplesSizes, nbSamples,
params);
result =
ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
samplesSizes, nbSamples, params);
free(newBuff);
return result;
}
@ -1064,11 +1052,13 @@ size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacit
size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
{
ZDICT_params_t params;
ZDICT_cover_params_t params;
memset(&params, 0, sizeof(params));
return ZDICT_trainFromBuffer_advanced(dictBuffer, dictBufferCapacity,
samplesBuffer, samplesSizes, nbSamples,
params);
params.d = 8;
params.steps = 4;
return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity,
samplesBuffer, samplesSizes,
nbSamples, &params);
}
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,

View File

@ -20,10 +20,12 @@ extern "C" {
/* ===== ZDICTLIB_API : control library symbols visibility ===== */
#if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
#else
# define ZDICTLIB_VISIBILITY
#ifndef ZDICTLIB_VISIBILITY
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
# else
# define ZDICTLIB_VISIBILITY
# endif
#endif
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
@ -34,18 +36,20 @@ extern "C" {
#endif
/*! ZDICT_trainFromBuffer() :
Train a dictionary from an array of samples.
Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
The resulting dictionary will be saved into `dictBuffer`.
@return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
or an error code, which can be tested with ZDICT_isError().
Tips : In general, a reasonable dictionary has a size of ~ 100 KB.
It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
In general, it's recommended to provide a few thousands samples, but this can vary a lot.
It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
*/
/*! ZDICT_trainFromBuffer():
* Train a dictionary from an array of samples.
* Uses ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4.
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
* The resulting dictionary will be saved into `dictBuffer`.
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte.
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
* It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
* In general, it's recommended to provide a few thousands samples, but this can vary a lot.
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
*/
ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
@ -67,94 +71,78 @@ ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
* ==================================================================================== */
typedef struct {
unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
int compressionLevel; /* 0 means default; target a specific zstd compression level */
unsigned notificationLevel; /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
unsigned dictID; /* 0 means auto mode (32-bits random value); other : force dictID value */
unsigned reserved[2]; /* reserved space for future parameters */
} ZDICT_params_t;
/*! ZDICT_trainFromBuffer_advanced() :
Same as ZDICT_trainFromBuffer() with control over more parameters.
`parameters` is optional and can be provided with values set to 0 to mean "default".
@return : size of dictionary stored into `dictBuffer` (<= `dictBufferSize`),
or an error code, which can be tested by ZDICT_isError().
note : ZDICT_trainFromBuffer_advanced() will send notifications into stderr if instructed to, using notificationLevel>0.
*/
ZDICTLIB_API size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t parameters);
/*! COVER_params_t :
For all values 0 means default.
k and d are the only required parameters.
*/
/*! ZDICT_cover_params_t:
* For all values 0 means default.
* k and d are the only required parameters.
*/
typedef struct {
unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
unsigned notificationLevel; /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
unsigned dictID; /* 0 means auto mode (32-bits random value); other : force dictID value */
int compressionLevel; /* 0 means default; target a specific zstd compression level */
} COVER_params_t;
ZDICT_params_t zParams;
} ZDICT_cover_params_t;
/*! COVER_trainFromBuffer() :
Train a dictionary from an array of samples using the COVER algorithm.
Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
The resulting dictionary will be saved into `dictBuffer`.
@return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
or an error code, which can be tested with ZDICT_isError().
Note : COVER_trainFromBuffer() requires about 9 bytes of memory for each input byte.
Tips : In general, a reasonable dictionary has a size of ~ 100 KB.
It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
In general, it's recommended to provide a few thousands samples, but this can vary a lot.
It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
*/
ZDICTLIB_API size_t COVER_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
COVER_params_t parameters);
/*! ZDICT_trainFromBuffer_cover():
* Train a dictionary from an array of samples using the COVER algorithm.
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
* The resulting dictionary will be saved into `dictBuffer`.
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
* It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
* In general, it's recommended to provide a few thousands samples, but this can vary a lot.
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
*/
ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
const size_t *samplesSizes, unsigned nbSamples,
ZDICT_cover_params_t parameters);
/*! COVER_optimizeTrainFromBuffer() :
The same requirements as above hold for all the parameters except `parameters`.
This function tries many parameter combinations and picks the best parameters.
`*parameters` is filled with the best parameters found, and the dictionary
constructed with those parameters is stored in `dictBuffer`.
/*! ZDICT_optimizeTrainFromBuffer_cover():
* The same requirements as above hold for all the parameters except `parameters`.
* This function tries many parameter combinations and picks the best parameters.
* `*parameters` is filled with the best parameters found, and the dictionary
* constructed with those parameters is stored in `dictBuffer`.
*
* All of the parameters d, k, steps are optional.
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
* if steps is zero it defaults to its default value.
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
*
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* On success `*parameters` contains the parameters selected.
* Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
*/
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
const size_t *samplesSizes, unsigned nbSamples,
ZDICT_cover_params_t *parameters);
All of the parameters d, k, steps are optional.
If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
if steps is zero it defaults to its default value.
If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
@return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
or an error code, which can be tested with ZDICT_isError().
On success `*parameters` contains the parameters selected.
Note : COVER_optimizeTrainFromBuffer() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
*/
ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
COVER_params_t *parameters);
/*! ZDICT_finalizeDictionary() :
Given a custom content as a basis for dictionary, and a set of samples,
finalize dictionary by adding headers and statistics.
Samples must be stored concatenated in a flat buffer `samplesBuffer`,
supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
@return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
or an error code, which can be tested by ZDICT_isError().
note : ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
note 2 : dictBuffer and dictContent can overlap
*/
/*! ZDICT_finalizeDictionary():
* Given a custom content as a basis for dictionary, and a set of samples,
* finalize dictionary by adding headers and statistics.
*
* Samples must be stored concatenated in a flat buffer `samplesBuffer`,
* supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
*
* dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
* maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
*
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
* or an error code, which can be tested by ZDICT_isError().
* Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
* Note 2: dictBuffer and dictContent can overlap
*/
#define ZDICT_CONTENTSIZE_MIN 128
#define ZDICT_DICTSIZE_MIN 256
ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
@ -162,7 +150,28 @@ ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBuffer
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t parameters);
typedef struct {
unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
ZDICT_params_t zParams;
} ZDICT_legacy_params_t;
/*! ZDICT_trainFromBuffer_legacy():
* Train a dictionary from an array of samples.
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
* supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
* The resulting dictionary will be saved into `dictBuffer`.
* `parameters` is optional and can be provided with values set to 0 to mean "default".
* @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
* It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
* In general, it's recommended to provide a few thousands samples, but this can vary a lot.
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
* Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
*/
ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters);
/* Deprecation warnings */
/* It is generally possible to disable deprecation warnings from compiler,

View File

@ -816,13 +816,13 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
default:;
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
default: break;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
@ -3665,7 +3665,7 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
break;
}
zbc->stage = ZBUFFds_read;
/* fall-through */
case ZBUFFds_read:
{
size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
@ -3691,7 +3691,7 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
if (ip==iend) { notDone = 0; break; } /* no more input */
zbc->stage = ZBUFFds_load;
}
/* fall-through */
case ZBUFFds_load:
{
size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
@ -3711,9 +3711,10 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
if (!decodedSize) { zbc->stage = ZBUFFds_read; break; } /* this was just a header */
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFds_flush;
// break; /* ZBUFFds_flush follows */
/* ZBUFFds_flush follows */
}
}
/* fall-through */
case ZBUFFds_flush:
{
size_t toFlushSize = zbc->outEnd - zbc->outStart;

View File

@ -326,13 +326,6 @@ size_t ZSTDv05_decompress_usingPreparedDCtx(
* Streaming functions (direct mode)
****************************************/
size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx);
size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx);
size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/*
Streaming decompression, direct mode (bufferless)
@ -818,13 +811,13 @@ MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuff
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
default:;
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
default: break;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
@ -3955,7 +3948,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
zbc->stage = ZBUFFv05ds_decodeHeader;
break;
}
/* fall-through */
case ZBUFFv05ds_loadHeader:
/* complete header from src */
{
@ -3973,7 +3966,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
}
// zbc->stage = ZBUFFv05ds_decodeHeader; break; /* useless : stage follows */
}
/* fall-through */
case ZBUFFv05ds_decodeHeader:
/* apply header to create / resize buffers */
{
@ -4000,7 +3993,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
break;
}
zbc->stage = ZBUFFv05ds_read;
/* fall-through */
case ZBUFFv05ds_read:
{
size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
@ -4024,7 +4017,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
if (ip==iend) { notDone = 0; break; } /* no more input */
zbc->stage = ZBUFFv05ds_load;
}
/* fall-through */
case ZBUFFv05ds_load:
{
size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
@ -4045,7 +4038,9 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFv05ds_flush;
// break; /* ZBUFFv05ds_flush follows */
} }
}
}
/* fall-through */
case ZBUFFv05ds_flush:
{
size_t toFlushSize = zbc->outEnd - zbc->outStart;

View File

@ -910,13 +910,13 @@ MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuff
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
default:;
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */
@ -3789,7 +3789,7 @@ size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapac
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
/* fall-through */
case ZSTDds_decodeFrameHeader:
{ size_t result;
memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);
@ -4116,7 +4116,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
if (zbd->outBuff == NULL) return ERROR(memory_allocation);
} } }
zbd->stage = ZBUFFds_read;
/* fall-through */
case ZBUFFds_read:
{ size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
if (neededInSize==0) { /* end of frame */
@ -4138,7 +4138,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
if (ip==iend) { notDone = 0; break; } /* no more input */
zbd->stage = ZBUFFds_load;
}
/* fall-through */
case ZBUFFds_load:
{ size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
@ -4159,8 +4159,9 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
// break; /* ZBUFFds_flush follows */
} }
}
}
/* fall-through */
case ZBUFFds_flush:
{ size_t const toFlushSize = zbd->outEnd - zbd->outStart;
size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);

View File

@ -580,13 +580,13 @@ MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuff
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
default:;
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
@ -2920,8 +2920,6 @@ typedef struct {
void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
/* custom memory allocation functions */
void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size);
void ZSTDv07_defaultFreeFunction(void* opaque, void* address);
static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };
#endif /* ZSTDv07_CCOMMON_H_MODULE */
@ -4047,7 +4045,7 @@ size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapac
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
/* fall-through */
case ZSTDds_decodeFrameHeader:
{ size_t result;
memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
@ -4494,7 +4492,7 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
} } }
zbd->stage = ZBUFFds_read;
/* pass-through */
/* fall-through */
case ZBUFFds_read:
{ size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
if (neededInSize==0) { /* end of frame */
@ -4517,7 +4515,7 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
if (ip==iend) { notDone = 0; break; } /* no more input */
zbd->stage = ZBUFFds_load;
}
/* fall-through */
case ZBUFFds_load:
{ size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
@ -4540,8 +4538,9 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
zbd->stage = ZBUFFds_flush;
/* break; */
/* pass-through */
} }
}
}
/* fall-through */
case ZBUFFds_flush:
{ size_t const toFlushSize = zbd->outEnd - zbd->outStart;
size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);

View File

@ -19,10 +19,12 @@ extern "C" {
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
#if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
#else
# define ZSTDLIB_VISIBILITY
#ifndef ZSTDLIB_VISIBILITY
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
# else
# define ZSTDLIB_VISIBILITY
# endif
#endif
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
@ -36,35 +38,37 @@ extern "C" {
/*******************************************************************************************************
Introduction
zstd, short for Zstandard, is a fast lossless compression algorithm, targeting real-time compression scenarios
at zlib-level and better compression ratios. The zstd compression library provides in-memory compression and
decompression functions. The library supports compression levels from 1 up to ZSTD_maxCLevel() which is 22.
zstd, short for Zstandard, is a fast lossless compression algorithm,
targeting real-time compression scenarios at zlib-level and better compression ratios.
The zstd compression library provides in-memory compression and decompression functions.
The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22.
Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
Compression can be done in:
- a single step (described as Simple API)
- a single step, reusing a context (described as Explicit memory management)
- unbounded multiple steps (described as Streaming compression)
The compression ratio achievable on small data can be highly improved using compression with a dictionary in:
The compression ratio achievable on small data can be highly improved using a dictionary in:
- a single step (described as Simple dictionary API)
- a single step, reusing a dictionary (described as Fast dictionary API)
Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
These APIs shall never be used with a dynamic library.
Advanced experimental APIs shall never be used with a dynamic library.
They are not "stable", their definition may change in the future. Only static linking is allowed.
*********************************************************************************************************/
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 2
#define ZSTD_VERSION_MINOR 3
#define ZSTD_VERSION_RELEASE 0
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll version */
#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
#define ZSTD_QUOTE(str) #str
#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< library version number; to be used when checking dll version */
ZSTDLIB_API const char* ZSTD_versionString(void); /* v1.3.0 */
/***************************************
@ -81,38 +85,48 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
/*! ZSTD_decompress() :
* `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
* `dstCapacity` is an upper bound of originalSize.
* `dstCapacity` is an upper bound of originalSize to regenerate.
* If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
* @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
* or an errorCode if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
/*! ZSTD_getDecompressedSize() :
* NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize.
* ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single
* frame, but distinguishes empty frames from frames with an unknown size, or errors.
*
* Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple
* concatenated frames in one buffer, and so is more general.
* As a result however, it requires more computation and entire frames to be passed to it,
* as opposed to ZSTD_getFrameContentSize which requires only a single frame's header.
*
* 'src' is the start of a zstd compressed frame.
* @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
* note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
* When `return==0`, data to decompress could be any size.
/*! ZSTD_getFrameContentSize() : v1.3.0
* `src` should point to the start of a ZSTD encoded frame.
* `srcSize` must be at least as large as the frame header.
* hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
* @return : - decompressed size of the frame in `src`, if known
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
* note 1 : a 0 return value means the frame is valid but "empty".
* note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
* When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
* In which case, it's necessary to use streaming mode to decompress data.
* Optionally, application can still use ZSTD_decompress() while relying on implied limits.
* (For example, data may be necessarily cut into blocks <= 16 KB).
* note 2 : decompressed size is always present when compression is done with ZSTD_compress()
* note 3 : decompressed size can be very large (64-bits value),
* Optionally, application can rely on some implicit limit,
* as ZSTD_decompress() only needs an upper bound of decompressed size.
* (For example, data could be necessarily cut into blocks <= 16 KB).
* note 3 : decompressed size is always present when compression is done with ZSTD_compress()
* note 4 : decompressed size can be very large (64-bits value),
* potentially larger than what local system can handle as a single memory segment.
* In which case, it's necessary to use streaming mode to decompress data.
* note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
* Always ensure result fits within application's authorized limits.
* note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
* Always ensure return value fits within application's authorized limits.
* Each application can set its own limits.
* note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more. */
* note 6 : This function replaces ZSTD_getDecompressedSize() */
#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
/*! ZSTD_getDecompressedSize() :
* NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
* Both functions work the same way,
* but ZSTD_getDecompressedSize() blends
* "empty", "unknown" and "error" results in the same return value (0),
* while ZSTD_getFrameContentSize() distinguishes them.
*
* 'src' is the start of a zstd compressed frame.
* @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
@ -137,29 +151,35 @@ ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
/*! ZSTD_compressCCtx() :
* Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
/*= Decompression context
* When decompressing many times,
* it is recommended to allocate a context just once, and re-use it for each successive compression operation.
* it is recommended to allocate a context only once,
* and re-use it for each successive compression operation.
* This will make workload friendlier for system's memory.
* Use one context per thread for parallel execution in multi-threaded environments. */
* Use one context per thread for parallel execution. */
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
/*! ZSTD_decompressDCtx() :
* Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). */
ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
* Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()) */
ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
/**************************
* Simple dictionary API
***************************/
/*! ZSTD_compress_usingDict() :
* Compression using a predefined Dictionary (see dictBuilder/zdict.h).
* Note : This function loads the dictionary, resulting in significant startup delay.
* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
* Compression using a predefined Dictionary (see dictBuilder/zdict.h).
* Note : This function loads the dictionary, resulting in significant startup delay.
* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -167,30 +187,31 @@ ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
int compressionLevel);
/*! ZSTD_decompress_usingDict() :
* Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
* Dictionary must be identical to the one used during compression.
* Note : This function loads the dictionary, resulting in significant startup delay.
* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
* Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
* Dictionary must be identical to the one used during compression.
* Note : This function loads the dictionary, resulting in significant startup delay.
* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
/****************************
* Fast dictionary API
****************************/
/**********************************
* Bulk processing dictionary API
*********************************/
typedef struct ZSTD_CDict_s ZSTD_CDict;
/*! ZSTD_createCDict() :
* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
* ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
* ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only.
* `dictBuffer` can be released after ZSTD_CDict creation, as its content is copied within CDict */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel);
* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
* ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
* ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
* `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
int compressionLevel);
/*! ZSTD_freeCDict() :
* Function frees memory allocated by ZSTD_createCDict(). */
* Function frees memory allocated by ZSTD_createCDict(). */
ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
/*! ZSTD_compress_usingCDict() :
@ -207,17 +228,17 @@ ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
typedef struct ZSTD_DDict_s ZSTD_DDict;
/*! ZSTD_createDDict() :
* Create a digested dictionary, ready to start decompression operation without startup delay.
* dictBuffer can be released after DDict creation, as its content is copied inside DDict */
* Create a digested dictionary, ready to start decompression operation without startup delay.
* dictBuffer can be released after DDict creation, as its content is copied inside DDict */
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
/*! ZSTD_freeDDict() :
* Function frees memory allocated with ZSTD_createDDict() */
* Function frees memory allocated with ZSTD_createDDict() */
ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
/*! ZSTD_decompress_usingDDict() :
* Decompression using a digested Dictionary.
* Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
* Decompression using a digested Dictionary.
* Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@ -274,14 +295,17 @@ typedef struct ZSTD_outBuffer_s {
* ZSTD_endStream() instructs to finish a frame.
* It will perform a flush and write frame epilogue.
* The epilogue is required for decoders to consider a frame completed.
* Similar to ZSTD_flushStream(), it may not be able to flush the full content if `output->size` is too small.
* ZSTD_endStream() may not be able to flush full data if `output->size` is too small.
* In which case, call again ZSTD_endStream() to complete the flush.
* @return : nb of bytes still present within internal buffer (0 if it's empty, hence compression completed)
* @return : 0 if frame fully completed and fully flushed,
or >0 if some data is still present within internal buffer
(value is minimum size estimation for remaining data to flush, but it could be more)
* or an error code, which can be tested using ZSTD_isError().
*
* *******************************************************************/
typedef struct ZSTD_CStream_s ZSTD_CStream;
typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
/* Continue to distinguish them for compatibility with versions <= v1.2.0 */
/*===== ZSTD_CStream management functions =====*/
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
@ -319,7 +343,8 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
* The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
* *******************************************************************************/
typedef struct ZSTD_DStream_s ZSTD_DStream;
typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
/* Continue to distinguish them for compatibility with versions <= v1.2.0 */
/*===== ZSTD_DStream management functions =====*/
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
@ -334,23 +359,22 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output
#endif /* ZSTD_H_235446 */
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
/****************************************************************************************
* START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
* The definitions in this section are considered experimental.
* They should never be used with a dynamic library, as they may change in the future.
* They are provided for advanced usages.
* They should never be used with a dynamic library, as prototypes may change in the future.
* They are provided for advanced scenarios.
* Use them only in association with static linking.
* ***************************************************************************************/
#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
/* --- Constants ---*/
#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* v0.7+ */
#define ZSTD_WINDOWLOG_MAX_32 27
#define ZSTD_WINDOWLOG_MAX_64 27
@ -370,14 +394,15 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output
#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */
#define ZSTD_FRAMEHEADERSIZE_MIN 6
static const size_t ZSTD_frameHeaderSize_prefix = 5;
static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
static const size_t ZSTD_frameHeaderSize_prefix = 5; /* minimum input size to know frame header size */
static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
static const size_t ZSTD_skippableHeaderSize = 8; /* magic number + skippable frame length */
/*--- Advanced types ---*/
typedef enum { ZSTD_fast, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2, ZSTD_btopt, ZSTD_btopt2 } ZSTD_strategy; /* from faster to stronger */
typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy; /* from faster to stronger */
typedef struct {
unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */
@ -400,76 +425,141 @@ typedef struct {
ZSTD_frameParameters fParams;
} ZSTD_parameters;
typedef struct {
unsigned long long frameContentSize;
size_t windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameHeader;
/*= Custom memory allocation functions */
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
/* use this constant to defer to stdlib's functions */
static const ZSTD_customMem ZSTD_defaultCMem = { NULL, NULL, NULL };
/***************************************
* Compressed size functions
* Frame size functions
***************************************/
/*! ZSTD_findFrameCompressedSize() :
* `src` should point to the start of a ZSTD encoded frame or skippable frame
* `srcSize` must be at least as large as the frame
* @return : the compressed size of the frame pointed to by `src`, suitable to pass to
* `ZSTD_decompress` or similar, or an error code if given invalid input. */
* @return : the compressed size of the first frame starting at `src`,
* suitable to pass to `ZSTD_decompress` or similar,
* or an error code if input is invalid */
ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
/***************************************
* Decompressed size functions
***************************************/
/*! ZSTD_getFrameContentSize() :
* `src` should point to the start of a ZSTD encoded frame
* `srcSize` must be at least as large as the frame header. A value greater than or equal
* to `ZSTD_frameHeaderSize_max` is guaranteed to be large enough in all cases.
* @return : decompressed size of the frame pointed to be `src` if known, otherwise
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
/*! ZSTD_findDecompressedSize() :
* `src` should point the start of a series of ZSTD encoded and/or skippable frames
* `srcSize` must be the _exact_ size of this series
* (i.e. there should be a frame boundary exactly `srcSize` bytes after `src`)
* @return : the decompressed size of all data in the contained frames, as a 64-bit value _if known_
* - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
* - if an error occurred: ZSTD_CONTENTSIZE_ERROR
*
* note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
* When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
* In which case, it's necessary to use streaming mode to decompress data.
* Optionally, application can still use ZSTD_decompress() while relying on implied limits.
* (For example, data may be necessarily cut into blocks <= 16 KB).
* note 2 : decompressed size is always present when compression is done with ZSTD_compress()
* note 3 : decompressed size can be very large (64-bits value),
* potentially larger than what local system can handle as a single memory segment.
* In which case, it's necessary to use streaming mode to decompress data.
* note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
* Always ensure result fits within application's authorized limits.
* Each application can set its own limits.
* note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
* read each contained frame header. This is efficient as most of the data is skipped,
* however it does mean that all frame data must be present and valid. */
* `src` should point the start of a series of ZSTD encoded and/or skippable frames
* `srcSize` must be the _exact_ size of this series
* (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`)
* @return : - decompressed size of all data in all successive frames
* - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
* - if an error occurred: ZSTD_CONTENTSIZE_ERROR
*
* note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
* When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
* In which case, it's necessary to use streaming mode to decompress data.
* note 2 : decompressed size is always present when compression is done with ZSTD_compress()
* note 3 : decompressed size can be very large (64-bits value),
* potentially larger than what local system can handle as a single memory segment.
* In which case, it's necessary to use streaming mode to decompress data.
* note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
* Always ensure result fits within application's authorized limits.
* Each application can set its own limits.
* note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
* read each contained frame header. This is fast as most of the data is skipped,
* however it does mean that all frame data must be present and valid. */
ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
/*! ZSTD_frameHeaderSize() :
* `src` should point to the start of a ZSTD frame
* `srcSize` must be >= ZSTD_frameHeaderSize_prefix.
* @return : size of the Frame Header */
ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
/***************************************
* Context memory usage
***************************************/
/*! ZSTD_sizeof_*() :
* These functions give the current memory usage of selected object.
* Object memory usage can evolve if it's re-used multiple times. */
ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
* ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
* It will also consider src size to be arbitrarily "large", which is worst case.
* If srcSize is known to always be small, ZSTD_estimateCCtxSize_advanced() can provide a tighter estimation.
* ZSTD_estimateCCtxSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
* Note : CCtx estimation is only correct for single-threaded compression */
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams);
ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
/*! ZSTD_estimate?StreamSize() :
* ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
* It will also consider src size to be arbitrarily "large", which is worst case.
* If srcSize is known to always be small, ZSTD_estimateCStreamSize_advanced() can provide a tighter estimation.
* ZSTD_estimateCStreamSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
* Note : CStream estimation is only correct for single-threaded compression.
* ZSTD_DStream memory budget depends on window Size.
* This information can be passed manually, using ZSTD_estimateDStreamSize,
* or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
* an internal ?Dict will be created, which additional size is not estimated here.
* In this case, get total size by adding ZSTD_estimate?DictSize */
ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams);
ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
/*! ZSTD_estimate?DictSize() :
* ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
* ZSTD_estimateCStreamSize_advanced() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced().
* Note : dictionary created "byReference" are smaller */
ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference);
ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
/***************************************
* Advanced compression functions
***************************************/
/*! ZSTD_estimateCCtxSize() :
* Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters.
* `frameContentSize` is an optional parameter, provide `0` if unknown */
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
/*! ZSTD_createCCtx_advanced() :
* Create a ZSTD compression context using external alloc and free functions */
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
/*! ZSTD_sizeofCCtx() :
* Gives the amount of memory used by a given ZSTD_CCtx */
ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
/*! ZSTD_initStaticCCtx() : initialize a fixed-size zstd compression context
* workspace: The memory area to emplace the context into.
* Provided pointer must 8-bytes aligned.
* It must outlive context usage.
* workspaceSize: Use ZSTD_estimateCCtxSize() or ZSTD_estimateCStreamSize()
* to determine how large workspace must be to support scenario.
* @return : pointer to ZSTD_CCtx*, or NULL if error (size too small)
* Note : zstd will never resize nor malloc() when using a static cctx.
* If it needs more memory than available, it will simply error out.
* Note 2 : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally too.
* Limitation 1 : currently not compatible with internal CDict creation, such as
* ZSTD_CCtx_loadDictionary() or ZSTD_initCStream_usingDict().
* Limitation 2 : currently not compatible with multi-threading
*/
ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
/* !!! To be deprecated !!! */
typedef enum {
ZSTD_p_forceWindow, /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */
ZSTD_p_forceRawDict /* Force loading dictionary in "content-only" mode (no header analysis) */
@ -479,20 +569,43 @@ typedef enum {
* @result : 0, or an error code (which can be tested with ZSTD_isError()) */
ZSTDLIB_API size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value);
/*! ZSTD_createCDict_byReference() :
* Create a digested dictionary for compression
* Dictionary content is simply referenced, and therefore stays in dictBuffer.
* It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
typedef enum { ZSTD_dm_auto=0, /* dictionary is "full" if it starts with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
ZSTD_dm_rawContent, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
ZSTD_dm_fullDict /* refuses to load a dictionary if it does not respect Zstandard's specification */
} ZSTD_dictMode_e;
/*! ZSTD_createCDict_advanced() :
* Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
unsigned byReference, ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams,
ZSTD_customMem customMem);
/*! ZSTD_sizeof_CDict() :
* Gives the amount of memory used by a given ZSTD_sizeof_CDict */
ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
/*! ZSTD_initStaticCDict_advanced() :
* Generate a digested dictionary in provided memory area.
* workspace: The memory area to emplace the dictionary into.
* Provided pointer must 8-bytes aligned.
* It must outlive dictionary usage.
* workspaceSize: Use ZSTD_estimateCDictSize()
* to determine how large workspace must be.
* cParams : use ZSTD_getCParams() to transform a compression level
* into its relevants cParams.
* @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
*/
ZSTDLIB_API ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
unsigned byReference, ZSTD_dictMode_e dictMode,
ZSTD_compressionParameters cParams);
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
@ -509,8 +622,8 @@ ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long l
ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
/*! ZSTD_adjustCParams() :
* optimize params for a given `srcSize` and `dictSize`.
* both values are optional, select `0` if unknown. */
* optimize params for a given `srcSize` and `dictSize`.
* both values are optional, select `0` if unknown. */
ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
/*! ZSTD_compress_advanced() :
@ -538,22 +651,32 @@ ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
* Note 3 : Skippable Frame Identifiers are considered valid. */
ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
/*! ZSTD_estimateDCtxSize() :
* Gives the potential amount of memory allocated to create a ZSTD_DCtx */
ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
/*! ZSTD_createDCtx_advanced() :
* Create a ZSTD decompression context using external alloc and free functions */
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
/*! ZSTD_sizeof_DCtx() :
* Gives the amount of memory used by a given ZSTD_DCtx */
ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
/*! ZSTD_initStaticDCtx() : initialize a fixed-size zstd decompression context
* workspace: The memory area to emplace the context into.
* Provided pointer must 8-bytes aligned.
* It must outlive context usage.
* workspaceSize: Use ZSTD_estimateDCtxSize() or ZSTD_estimateDStreamSize()
* to determine how large workspace must be to support scenario.
* @return : pointer to ZSTD_DCtx*, or NULL if error (size too small)
* Note : zstd will never resize nor malloc() when using a static dctx.
* If it needs more memory than available, it will simply error out.
* Note 2 : static dctx is incompatible with legacy support
* Note 3 : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
* Limitation : currently not compatible with internal DDict creation,
* such as ZSTD_initDStream_usingDict().
*/
ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
/*! ZSTD_createDDict_byReference() :
* Create a digested dictionary, ready to start decompression operation without startup delay.
* Dictionary content is simply referenced, and therefore stays in dictBuffer.
* It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
* Dictionary content is referenced, and therefore stays in dictBuffer.
* It is important that dictBuffer outlives DDict,
* it must remain read accessible throughout the lifetime of DDict */
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
/*! ZSTD_createDDict_advanced() :
@ -561,9 +684,20 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, siz
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
unsigned byReference, ZSTD_customMem customMem);
/*! ZSTD_sizeof_DDict() :
* Gives the amount of memory used by a given ZSTD_DDict */
ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
/*! ZSTD_initStaticDDict() :
* Generate a digested dictionary in provided memory area.
* workspace: The memory area to emplace the dictionary into.
* Provided pointer must 8-bytes aligned.
* It must outlive dictionary usage.
* workspaceSize: Use ZSTD_estimateDDictSize()
* to determine how large workspace must be.
* @return : pointer to ZSTD_DDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
*/
ZSTDLIB_API ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
unsigned byReference);
/*! ZSTD_getDictID_fromDict() :
* Provides the dictID stored within dictionary.
@ -586,7 +720,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
* - This is not a Zstandard frame.
* When identifying the exact failure cause, it's possible to use ZSTD_getFrameParams(), which will provide a more precise error code. */
* When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
@ -596,13 +730,13 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
/*===== Advanced Streaming compression functions =====*/
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); /**< size of CStream is variable, depending primarily on compression level */
ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */
ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. */
ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
/*! ZSTD_resetCStream() :
* start a new compression job, using same parameters from previous job.
@ -617,11 +751,11 @@ ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledg
/*===== Advanced Streaming decompression functions =====*/
typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict will just be referenced, and must outlive decompression session */
ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */
ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
/*********************************************************************
@ -683,21 +817,24 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameParams().
It fills a ZSTD_frameParams structure which provide important information to correctly decode the frame,
such as the minimum rolling buffer size to allocate to decompress data (`windowSize`),
and the dictionary ID used.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
such as minimum rolling buffer size to allocate to decompress data (`windowSize`),
and the dictionary ID in use.
(Note : content size is optional, it may not be present. 0 means : content size unknown).
Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information.
As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation.
Each application can set its own limit, depending on local restrictions. For extended interoperability, it is recommended to support at least 8 MB.
Frame parameters are extracted from the beginning of the compressed frame.
Data fragment must be large enough to ensure successful decoding, typically `ZSTD_frameHeaderSize_max` bytes.
@result : 0 : successful decoding, the `ZSTD_frameParams` structure is correctly filled.
Each application can set its own limit, depending on local restrictions.
For extended interoperability, it is recommended to support windowSize of at least 8 MB.
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
Start decompression, with ZSTD_decompressBegin() or ZSTD_decompressBegin_usingDict().
Start decompression, with ZSTD_decompressBegin().
If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
Alternatively, you can copy a prepared context, using ZSTD_copyDCtx().
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
@ -729,30 +866,233 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
For skippable frames ZSTD_decompressContinue() always returns 0.
For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
For skippable frames ZSTD_getFrameHeader() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
Note : If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might actually be a Zstd encoded frame with no content.
For purposes of decompression, it is valid in both cases to skip the frame using
ZSTD_findFrameCompressedSize to find its size in bytes.
It also returns Frame Size as fparamsPtr->frameContentSize.
*/
typedef struct {
unsigned long long frameContentSize;
unsigned windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameParams;
/*===== Buffer-less streaming decompression functions =====*/
ZSTDLIB_API size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input, see details below */
ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
/*=== New advanced API (experimental, and compression only) ===*/
/* notes on API design :
* In this proposal, parameters are pushed one by one into an existing CCtx,
* and then applied on all subsequent compression jobs.
* When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT.
*
* This API is intended to replace all others experimental API.
* It can basically do all other use cases, and even new ones.
* It stands a good chance to become "stable",
* after a reasonable testing period.
*/
/* note on naming convention :
* Initially, the API favored names like ZSTD_setCCtxParameter() .
* In this proposal, convention is changed towards ZSTD_CCtx_setParameter() .
* The main driver is that it identifies more clearly the target object type.
* It feels clearer in light of potential variants :
* ZSTD_CDict_setParameter() (rather than ZSTD_setCDictParameter())
* ZSTD_DCtx_setParameter() (rather than ZSTD_setDCtxParameter() )
* Left variant feels easier to distinguish.
*/
/* note on enum design :
* All enum will be manually set to explicit values before reaching "stable API" status */
typedef enum {
/* compression parameters */
ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means "do not change cLevel". */
ZSTD_p_windowLog, /* Maximum allowed back-reference distance, expressed as power of 2.
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
* Special: value 0 means "do not change windowLog". */
ZSTD_p_hashLog, /* Size of the probe table, as a power of 2.
* Resulting table size is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
* Larger tables improve compression ratio of strategies <= dFast,
* and improve speed of strategies > dFast.
* Special: value 0 means "do not change hashLog". */
ZSTD_p_chainLog, /* Size of the full-search table, as a power of 2.
* Resulting table size is (1 << (chainLog+2)).
* Larger tables result in better and slower compression.
* This parameter is useless when using "fast" strategy.
* Special: value 0 means "do not change chainLog". */
ZSTD_p_searchLog, /* Number of search attempts, as a power of 2.
* More attempts result in better and slower compression.
* This parameter is useless when using "fast" and "dFast" strategies.
* Special: value 0 means "do not change searchLog". */
ZSTD_p_minMatch, /* Minimum size of searched matches (note : repCode matches can be smaller).
* Larger values make faster compression and decompression, but decrease ratio.
* Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
* Note that currently, for all strategies < btopt, effective minimum is 4.
* Note that currently, for all strategies > fast, effective maximum is 6.
* Special: value 0 means "do not change minMatchLength". */
ZSTD_p_targetLength, /* Only useful for strategies >= btopt.
* Length of Match considered "good enough" to stop search.
* Larger values make compression stronger and slower.
* Special: value 0 means "do not change targetLength". */
ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition.
* Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
* The higher the value of selected strategy, the more complex it is,
* resulting in stronger and slower compression.
* Special: value 0 means "do not change strategy". */
/* frame parameters */
ZSTD_p_contentSizeFlag=200, /* Content size is written into frame header _whenever known_ (default:1) */
ZSTD_p_checksumFlag, /* A 32-bits checksum of content is written at end of frame (default:0) */
ZSTD_p_dictIDFlag, /* When applicable, dictID of dictionary is provided in frame header (default:1) */
/* dictionary parameters (must be set before ZSTD_CCtx_loadDictionary) */
ZSTD_p_dictMode=300, /* Select how dictionary content must be interpreted. Value must be from type ZSTD_dictMode_e.
* default : 0==auto : dictionary will be "full" if it respects specification, otherwise it will be "rawContent" */
ZSTD_p_refDictContent, /* Dictionary content will be referenced, instead of copied (default:0==byCopy).
* It requires that dictionary buffer outlives its users */
/* multi-threading parameters */
ZSTD_p_nbThreads=400, /* Select how many threads a compression job can spawn (default:1)
* More threads improve speed, but also increase memory usage.
* Can only receive a value > 1 if ZSTD_MULTITHREAD is enabled.
* Special: value 0 means "do not change nbThreads" */
ZSTD_p_jobSize, /* Size of a compression job. Each compression job is completed in parallel.
* 0 means default, which is dynamically determined based on compression parameters.
* Job size must be a minimum of overlapSize, or 1 KB, whichever is largest
* The minimum size is automatically and transparently enforced */
ZSTD_p_overlapSizeLog, /* Size of previous input reloaded at the beginning of each job.
* 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
/* advanced parameters - may not remain available after API update */
ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
* even when referencing into Dictionary content (default:0) */
} ZSTD_cParameter;
/*! ZSTD_CCtx_setParameter() :
* Set one compression parameter, selected by enum ZSTD_cParameter.
* Note : when `value` is an enum, cast it to unsigned for proper type checking.
* @result : 0, or an error code (which can be tested with ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
/*! ZSTD_CCtx_setPledgedSrcSize() :
* Total input data size to be compressed as a single frame.
* This value will be controlled at the end, and result in error if not respected.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Note 1 : 0 means zero, empty.
* In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
* Note that ZSTD_CONTENTSIZE_UNKNOWN is default value for new compression jobs.
* Note 2 : If all data is provided and consumed in a single round,
* this value is overriden by srcSize instead. */
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
/*! ZSTD_CCtx_loadDictionary() :
* Create an internal CDict from dict buffer.
* Decompression will have to use same buffer.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
* meaning "return to no-dictionary mode".
* Note 1 : `dict` content will be copied internally,
* except if ZSTD_p_refDictContent is set before loading.
* Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
* For this reason, compression parameters cannot be changed anymore after loading a dictionary.
* It's also a CPU-heavy operation, with non-negligible impact on latency.
* Note 3 : Dictionary will be used for all future compression jobs.
* To return to "no-dictionary" situation, load a NULL dictionary */
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
/*! ZSTD_CCtx_refCDict() :
* Reference a prepared dictionary, to be used for all next compression jobs.
* Note that compression parameters are enforced from within CDict,
* and supercede any compression parameter previously set within CCtx.
* The dictionary will remain valid for future compression jobs using same CCtx.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : adding a NULL CDict means "return to no-dictionary mode".
* Note 1 : Currently, only one dictionary can be managed.
* Adding a new dictionary effectively "discards" any previous one.
* Note 2 : CDict is just referenced, its lifetime must outlive CCtx.
*/
ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
/*! ZSTD_CCtx_refPrefix() :
* Reference a prefix (single-usage dictionary) for next compression job.
* Decompression need same prefix to properly regenerate data.
* Prefix is **only used once**. Tables are discarded at end of compression job.
* Subsequent compression jobs will be done without prefix (if none is explicitly referenced).
* If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : Adding any prefix (including NULL) invalidates any previous prefix or dictionary
* Note 1 : Prefix buffer is referenced. It must outlive compression job.
* Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
* It's a CPU-heavy operation, with non-negligible impact on latency.
* Note 3 : it's possible to alter ZSTD_p_dictMode using ZSTD_CCtx_setParameter() */
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
typedef enum {
ZSTD_e_continue=0, /* collect more data, encoder transparently decides when to output result, for optimal conditions */
ZSTD_e_flush, /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
ZSTD_e_end /* flush any remaining data and ends current frame. Any future compression starts a new frame. */
} ZSTD_EndDirective;
/*! ZSTD_compress_generic() :
* Behave about the same as ZSTD_compressStream. To note :
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
* - Compression parameters cannot be changed once compression is started.
* - *dstPos must be <= dstCapacity, *srcPos must be <= srcSize
* - *dspPos and *srcPos will be updated. They are guaranteed to remain below their respective limit.
* - @return provides the minimum amount of data still to flush from internal buffers
* or an error code, which can be tested using ZSTD_isError().
* if @return != 0, flush is not fully completed, there is some data left within internal buffers.
* - after a ZSTD_e_end directive, if internal buffer is not fully flushed,
* only ZSTD_e_end or ZSTD_e_flush operations are allowed.
* It is necessary to fully flush internal buffers
* before starting a new compression job, or changing compression parameters.
*/
ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp);
/*! ZSTD_CCtx_reset() :
* Return a CCtx to clean state.
* Useful after an error, or to interrupt an ongoing compression job and start a new one.
* Any internal data not yet flushed is cancelled.
* Dictionary (if any) is dropped.
* It's possible to modify compression parameters after a reset.
*/
ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx); /* Not ready yet ! */
/*! ZSTD_compress_generic_simpleArgs() :
* Same as ZSTD_compress_generic(),
* but using only integral types as arguments.
* Argument list is larger and less expressive than ZSTD_{in,out}Buffer,
* but can be helpful for binders from dynamic languages
* which have troubles handling structures containing memory pointers.
*/
size_t ZSTD_compress_generic_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp);
/**
Block functions
@ -767,7 +1107,7 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSizeMax() <= ZSTD_BLOCKSIZE_ABSOLUTEMAX
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
@ -780,9 +1120,10 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
Use ZSTD_insertBlock() for such a case.
*/
#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) /* define, for static allocation */
#define ZSTD_BLOCKSIZELOG_MAX 17
#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX) /* define, for static allocation */
/*===== Raw zstd block functions =====*/
ZSTDLIB_API size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx);
ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */

View File

@ -41,9 +41,11 @@ CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
-I$(ZSTDDIR)/dictBuilder \
-DXXH_NAMESPACE=ZSTD_ # because xxhash.o already compiled with this macro from library
CFLAGS ?= -O3
DEBUGFLAGS = -g -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security
DEBUGFLAGS = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
-Wredundant-decls
CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS)
@ -56,7 +58,7 @@ ZDICT_FILES := $(ZSTDDIR)/dictBuilder/*.c
ZSTDDECOMP_O = $(ZSTDDIR)/decompress/zstd_decompress.o
ZSTD_LEGACY_SUPPORT ?= 4
ZSTDLEGACY_FILES:=
ZSTDLEGACY_FILES :=
ifneq ($(ZSTD_LEGACY_SUPPORT), 0)
ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0)
ZSTDLEGACY_FILES += $(shell ls $(ZSTDDIR)/legacy/*.c | grep 'v0[$(ZSTD_LEGACY_SUPPORT)-7]')
@ -137,16 +139,13 @@ all: zstd
$(ZSTDDECOMP_O): CFLAGS += $(ALIGN_LOOP)
zstd xzstd zstd4 xzstd4 : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP)
zstd xzstd zstd4 xzstd4 : LDFLAGS += $(THREAD_LD) $(ZLIBLD)
xzstd xzstd4 : CPPFLAGS += $(LZMACPP)
xzstd xzstd4 : LDFLAGS += $(LZMALD)
zstd4 xzstd4 : CPPFLAGS += $(LZ4CPP)
zstd4 xzstd4 : LDFLAGS += $(LZ4LD)
zstd zstd4 : LZMA_MSG := - xz/lzma support is disabled
zstd xzstd : LZ4_MSG := - lz4 support is disabled
zstd xzstd zstd4 xzstd4 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
zstd xzstd zstd4 xzstd4 : $(ZSTDLIB_FILES) zstdcli.o fileio.o bench.o datagen.o dibio.o
zstd zstd4 : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP)
zstd zstd4 : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD)
zstd4 : CPPFLAGS += $(LZ4CPP)
zstd4 : LDFLAGS += $(LZ4LD)
zstd : LZ4_MSG := - lz4 support is disabled
zstd zstd4 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
zstd zstd4 : $(ZSTDLIB_FILES) zstdcli.o fileio.o bench.o datagen.o dibio.o
@echo "$(THREAD_MSG)"
@echo "$(ZLIB_MSG)"
@echo "$(LZMA_MSG)"
@ -179,6 +178,11 @@ zstd-nogz : ZLIBLD :=
zstd-nogz : ZLIB_MSG := - gzip support is disabled
zstd-nogz : zstd
zstd-noxz : LZMACPP :=
zstd-noxz : LZMALD :=
zstd-noxz : LZMA_MSG := - xz/lzma support is disabled
zstd-noxz : zstd
zstd-pgo : MOREFLAGS = -fprofile-generate
zstd-pgo : clean zstd

View File

@ -24,13 +24,23 @@ There are however other Makefile targets that create different variations of CLI
- __HAVE_ZLIB__ : `zstd` can compress and decompress files in `.gz` format.
This is done through command `--format=gzip`.
Alternatively, symlinks named `gzip` or `gunzip` will mimic intended behavior.
.gz support is automatically enabled when `zlib` library is detected at build time.
It's possible to disable .gz support, by either compiling `zstd-nogz` target or using HAVE_ZLIB=0 variable.
`.gz` support is automatically enabled when `zlib` library is detected at build time.
It's possible to disable `.gz` support, by either compiling `zstd-nogz` target or using HAVE_ZLIB=0 variable.
Example : make zstd HAVE_ZLIB=0
It's also possible to force compilation with zlib support, using HAVE_ZLIB=1.
In which case, linking stage will fail if `zlib` library cannot be found.
This might be useful to prevent silent feature disabling.
- __HAVE_LZMA__ : `zstd` can compress and decompress files in `.xz` and `.lzma` formats.
This is done through commands `--format=xz` and `--format=lzma` respectively.
Alternatively, symlinks named `xz`, `unxz`, `lzma`, or `unlzma` will mimic intended behavior.
`.xz` and `.lzma` support is automatically enabled when `lzma` library is detected at build time.
It's possible to disable `.xz` and `.lzma` support, by either compiling `zstd-noxz` target or using HAVE_LZMA=0 variable.
Example : make zstd HAVE_LZMA=0
It's also possible to force compilation with lzma support, using HAVE_LZMA=1.
In which case, linking stage will fail if `lzma` library cannot be found.
This might be useful to prevent silent feature disabling.
#### Aggregation of parameters
CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined into `-b1e18i1`.

View File

@ -86,14 +86,13 @@ static clock_t g_time = 0;
#ifndef DEBUG
# define DEBUG 0
#endif
#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__);
#define EXM_THROW(error, ...) \
{ \
DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \
DISPLAYLEVEL(1, "Error %i : ", error); \
DISPLAYLEVEL(1, __VA_ARGS__); \
DISPLAYLEVEL(1, " \n"); \
exit(error); \
#define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); }
#define EXM_THROW(error, ...) { \
DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \
DISPLAYLEVEL(1, "Error %i : ", error); \
DISPLAYLEVEL(1, __VA_ARGS__); \
DISPLAYLEVEL(1, " \n"); \
exit(error); \
}
@ -159,7 +158,6 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
const ZSTD_compressionParameters* comprParams)
{
size_t const blockSize = ((g_blockSize>=32 && !g_decodeOnly) ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
size_t const avgSize = MIN(blockSize, (srcSize / nbFiles));
U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles;
blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t));
size_t const maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); /* add some room for safety */
@ -262,42 +260,72 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
UTIL_getTime(&clockStart);
if (!cCompleted) { /* still some time to do compression tests */
ZSTD_customMem const cmem = { NULL, NULL, NULL };
U64 const clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
U32 nbLoops = 0;
ZSTD_CDict* cdict = NULL;
#ifdef ZSTD_NEWAPI
ZSTD_CCtx_setParameter(ctx, ZSTD_p_nbThreads, g_nbThreads);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_compressionLevel, cLevel);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_windowLog, comprParams->windowLog);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_chainLog, comprParams->chainLog);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_searchLog, comprParams->searchLog);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_minMatch, comprParams->searchLength);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_targetLength, comprParams->targetLength);
ZSTD_CCtx_setParameter(ctx, ZSTD_p_compressionStrategy, comprParams->strategy);
ZSTD_CCtx_loadDictionary(ctx, dictBuffer, dictBufferSize);
#else
size_t const avgSize = MIN(blockSize, (srcSize / nbFiles));
ZSTD_parameters zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
ZSTD_CDict* cdict;
ZSTD_customMem const cmem = { NULL, NULL, NULL };
if (comprParams->windowLog) zparams.cParams.windowLog = comprParams->windowLog;
if (comprParams->chainLog) zparams.cParams.chainLog = comprParams->chainLog;
if (comprParams->hashLog) zparams.cParams.hashLog = comprParams->hashLog;
if (comprParams->searchLog) zparams.cParams.searchLog = comprParams->searchLog;
if (comprParams->searchLength) zparams.cParams.searchLength = comprParams->searchLength;
if (comprParams->targetLength) zparams.cParams.targetLength = comprParams->targetLength;
if (comprParams->strategy) zparams.cParams.strategy = (ZSTD_strategy)(comprParams->strategy - 1);
cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1, zparams.cParams, cmem);
if (comprParams->strategy) zparams.cParams.strategy = comprParams->strategy;
cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1 /*byRef*/, ZSTD_dm_auto, zparams.cParams, cmem);
if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
#endif
do {
U32 blockNb;
size_t rSize;
for (blockNb=0; blockNb<nbBlocks; blockNb++) {
size_t rSize;
#ifdef ZSTD_NEWAPI
ZSTD_outBuffer out = { blockTable[blockNb].cPtr, blockTable[blockNb].cRoom, 0 };
ZSTD_inBuffer in = { blockTable[blockNb].srcPtr, blockTable[blockNb].srcSize, 0 };
size_t cError = 1;
while (cError) {
cError = ZSTD_compress_generic(ctx,
&out, &in, ZSTD_e_end);
if (ZSTD_isError(cError))
EXM_THROW(1, "ZSTD_compress_generic() error : %s",
ZSTD_getErrorName(cError));
}
rSize = out.pos;
#else /* ! ZSTD_NEWAPI */
if (dictBufferSize) {
rSize = ZSTD_compress_usingCDict(ctx,
blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize,
cdict);
} else {
#ifdef ZSTD_MULTITHREAD /* note : limitation : MT single-pass does not support compression with dictionary */
# ifdef ZSTD_MULTITHREAD /* note : limitation : MT single-pass does not support compression with dictionary */
rSize = ZSTDMT_compressCCtx(mtctx,
blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize,
cLevel);
#else
# else
rSize = ZSTD_compress_advanced (ctx,
blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, NULL, 0, zparams);
#endif
blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize,
NULL, 0, zparams);
# endif
}
if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compress_usingCDict() failed : %s", ZSTD_getErrorName(rSize));
if (ZSTD_isError(rSize))
EXM_THROW(1, "ZSTD_compress_usingCDict() failed : %s",
ZSTD_getErrorName(rSize));
#endif /* ZSTD_NEWAPI */
blockTable[blockNb].cSize = rSize;
}
nbLoops++;
@ -383,7 +411,7 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
}
pos = (U32)(u - bacc);
bNb = pos / (128 KB);
DISPLAY("(block %u, sub %u, pos %u) \n", segNb, bNb, pos);
DISPLAY("(sample %u, block %u, pos %u) \n", segNb, bNb, pos);
if (u>5) {
int n;
for (n=-5; n<0; n++) DISPLAY("%02X ", ((const BYTE*)srcBuffer)[u+n]);

View File

@ -216,21 +216,21 @@ static U64 DiB_getTotalCappedFileSize(const char** fileNamesTable, unsigned nbFi
}
/*! ZDICT_trainFromBuffer_unsafe() :
/*! ZDICT_trainFromBuffer_unsafe_legacy() :
Strictly Internal use only !!
Same as ZDICT_trainFromBuffer_advanced(), but does not control `samplesBuffer`.
Same as ZDICT_trainFromBuffer_legacy(), but does not control `samplesBuffer`.
`samplesBuffer` must be followed by noisy guard band to avoid out-of-buffer reads.
@return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
or an error code.
*/
size_t ZDICT_trainFromBuffer_unsafe(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t parameters);
size_t ZDICT_trainFromBuffer_unsafe_legacy(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_legacy_params_t parameters);
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
const char** fileNamesTable, unsigned nbFiles,
ZDICT_params_t *params, COVER_params_t *coverParams,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover)
{
void* const dictBuffer = malloc(maxDictSize);
@ -243,8 +243,8 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
int result = 0;
/* Checks */
if (params) g_displayLevel = params->notificationLevel;
else if (coverParams) g_displayLevel = coverParams->notificationLevel;
if (params) g_displayLevel = params->zParams.notificationLevel;
else if (coverParams) g_displayLevel = coverParams->zParams.notificationLevel;
else EXM_THROW(13, "Neither dictionary algorith selected"); /* should not happen */
if ((!fileSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */
if (g_tooLargeSamples) {
@ -273,20 +273,20 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
size_t dictSize;
if (params) {
DiB_fillNoise((char*)srcBuffer + benchedSize, NOISELENGTH); /* guard band, for end of buffer condition */
dictSize = ZDICT_trainFromBuffer_unsafe(dictBuffer, maxDictSize,
srcBuffer, fileSizes, nbFiles,
*params);
dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize,
srcBuffer, fileSizes, nbFiles,
*params);
} else if (optimizeCover) {
dictSize = COVER_optimizeTrainFromBuffer(
dictBuffer, maxDictSize, srcBuffer, fileSizes, nbFiles,
coverParams);
dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize,
srcBuffer, fileSizes, nbFiles,
coverParams);
if (!ZDICT_isError(dictSize)) {
DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps);
DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps);
}
} else {
dictSize = COVER_trainFromBuffer(dictBuffer, maxDictSize,
srcBuffer, fileSizes, nbFiles,
*coverParams);
dictSize =
ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer,
fileSizes, nbFiles, *coverParams);
}
if (ZDICT_isError(dictSize)) {
DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */

View File

@ -32,7 +32,7 @@
*/
int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
const char** fileNamesTable, unsigned nbFiles,
ZDICT_params_t *params, COVER_params_t *coverParams,
ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams,
int optimizeCover);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -70,6 +70,7 @@ int FIO_compressFilename (const char* outfilename, const char* infilename, const
@return : 0 == ok; 1 == pb with src file. */
int FIO_decompressFilename (const char* outfilename, const char* infilename, const char* dictFileName);
int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int displayLevel);
/*-*************************************
* Multiple File functions

View File

@ -208,7 +208,7 @@ UTIL_STATIC int UTIL_getFileStat(const char* infilename, stat_t *statbuf)
}
UTIL_STATIC int UTIL_isRegFile(const char* infilename)
UTIL_STATIC int UTIL_isRegularFile(const char* infilename)
{
stat_t statbuf;
return UTIL_getFileStat(infilename, &statbuf); /* Only need to know whether it is a regular file */
@ -609,7 +609,7 @@ UTIL_STATIC int UTIL_countPhysicalCores(void)
/* try to determine if there's hyperthreading */
{ FILE* const cpuinfo = fopen("/proc/cpuinfo", "r");
size_t const BUF_SIZE = 80;
#define BUF_SIZE 80
char buff[BUF_SIZE];
int siblings = 0;

View File

@ -1,5 +1,5 @@
.
.TH "ZSTD" "1" "May 2017" "zstd 1.2.0" "User Commands"
.TH "ZSTD" "1" "June 2017" "zstd 1.3.0" "User Commands"
.
.SH "NAME"
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
@ -89,6 +89,10 @@ Benchmark file(s) using compression level #
\fB\-\-train FILEs\fR
Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\.
.
.TP
\fB\-l\fR, \fB\-\-list\fR
Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command can be augmented with the \fB\-v\fR modifier\.
.
.SS "Operation modifiers"
.
.TP
@ -235,8 +239,8 @@ benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\
minimum evaluation time, in seconds (default : 3s), benchmark mode only
.
.TP
\fB\-B#\fR
cut file into independent blocks of size # (default: no block)
\fB\-B#\fR, \fB\-\-block\-size=#\fR
cut file(s) into independent blocks of size # (default: no block)
.
.TP
\fB\-\-priority=rt\fR
@ -252,7 +256,7 @@ set process priority to real\-time
Specify a strategy used by a match finder\.
.
.IP
There are 8 strategies numbered from 0 to 7, from faster to stronger: 0=ZSTD_fast, 1=ZSTD_dfast, 2=ZSTD_greedy, 3=ZSTD_lazy, 4=ZSTD_lazy2, 5=ZSTD_btlazy2, 6=ZSTD_btopt, 7=ZSTD_btopt2\.
There are 8 strategies numbered from 1 to 8, from faster to stronger: 1=ZSTD_fast, 2=ZSTD_dfast, 3=ZSTD_greedy, 4=ZSTD_lazy, 5=ZSTD_lazy2, 6=ZSTD_btlazy2, 7=ZSTD_btopt, 8=ZSTD_btultra\.
.
.TP
\fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR
@ -306,7 +310,7 @@ The minimum \fIslen\fR is 3 and the maximum is 7\.
Specify the minimum match length that causes a match finder to stop searching for better matches\.
.
.IP
A larger minimum match length usually improves compression ratio but decreases compression speed\. This option is only used with strategies ZSTD_btopt and ZSTD_btopt2\.
A larger minimum match length usually improves compression ratio but decreases compression speed\. This option is only used with strategies ZSTD_btopt and ZSTD_btultra\.
.
.IP
The minimum \fItlen\fR is 4 and the maximum is 999\.

View File

@ -93,6 +93,10 @@ the last one takes effect.
* `--train FILEs`:
Use FILEs as a training set to create a dictionary.
The training set should contain a lot of small files (> 100).
* `-l`, `--list`:
Display information related to a zstd compressed file, such as size, ratio, and checksum.
Some of these fields may not be available.
This command can be augmented with the `-v` modifier.
### Operation modifiers
@ -230,8 +234,8 @@ BENCHMARK
benchmark file(s) using multiple compression levels, from `-b#` to `-e#` (inclusive)
* `-i#`:
minimum evaluation time, in seconds (default : 3s), benchmark mode only
* `-B#`:
cut file into independent blocks of size # (default: no block)
* `-B#`, `--block-size=#`:
cut file(s) into independent blocks of size # (default: no block)
* `--priority=rt`:
set process priority to real-time
@ -250,9 +254,9 @@ The list of available _options_:
- `strategy`=_strat_, `strat`=_strat_:
Specify a strategy used by a match finder.
There are 8 strategies numbered from 0 to 7, from faster to stronger:
0=ZSTD\_fast, 1=ZSTD\_dfast, 2=ZSTD\_greedy, 3=ZSTD\_lazy,
4=ZSTD\_lazy2, 5=ZSTD\_btlazy2, 6=ZSTD\_btopt, 7=ZSTD\_btopt2.
There are 8 strategies numbered from 1 to 8, from faster to stronger:
1=ZSTD\_fast, 2=ZSTD\_dfast, 3=ZSTD\_greedy, 4=ZSTD\_lazy,
5=ZSTD\_lazy2, 6=ZSTD\_btlazy2, 7=ZSTD\_btopt, 8=ZSTD\_btultra.
- `windowLog`=_wlog_, `wlog`=_wlog_:
Specify the maximum number of bits for a match distance.
@ -304,7 +308,7 @@ The list of available _options_:
A larger minimum match length usually improves compression ratio but
decreases compression speed.
This option is only used with strategies ZSTD_btopt and ZSTD_btopt2.
This option is only used with strategies ZSTD_btopt and ZSTD_btultra.
The minimum _tlen_ is 4 and the maximum is 999.

View File

@ -56,7 +56,9 @@
#define ZSTD_GUNZIP "gunzip"
#define ZSTD_GZCAT "gzcat"
#define ZSTD_LZMA "lzma"
#define ZSTD_UNLZMA "unlzma"
#define ZSTD_XZ "xz"
#define ZSTD_UNXZ "unxz"
#define KB *(1 <<10)
#define MB *(1 <<20)
@ -117,6 +119,7 @@ static int usage_advanced(const char* programName)
DISPLAY( " -v : verbose mode; specify multiple times to increase verbosity\n");
DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n");
DISPLAY( " -c : force write to standard output, even if it is the console\n");
DISPLAY( " -l : print information about zstd compressed files.\n");
#ifndef ZSTD_NOCOMPRESS
DISPLAY( "--ultra : enable levels beyond %i, up to %i (requires more memory)\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel());
#ifdef ZSTD_MULTITHREAD
@ -148,6 +151,7 @@ static int usage_advanced(const char* programName)
#endif
#endif
DISPLAY( " -M# : Set a memory usage limit for decompression \n");
DISPLAY( "--list : list information about a zstd compressed file \n");
DISPLAY( "-- : All arguments after \"--\" are treated as files \n");
#ifndef ZSTD_NODICT
DISPLAY( "\n");
@ -244,7 +248,7 @@ static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
* @return 1 means that cover parameters were correct
* @return 0 in case of malformed parameters
*/
static unsigned parseCoverParameters(const char* stringPtr, COVER_params_t* params)
static unsigned parseCoverParameters(const char* stringPtr, ZDICT_cover_params_t* params)
{
memset(params, 0, sizeof(*params));
for (; ;) {
@ -273,9 +277,9 @@ static unsigned parseLegacyParameters(const char* stringPtr, unsigned* selectivi
return 1;
}
static COVER_params_t defaultCoverParams(void)
static ZDICT_cover_params_t defaultCoverParams(void)
{
COVER_params_t params;
ZDICT_cover_params_t params;
memset(&params, 0, sizeof(params));
params.d = 8;
params.steps = 4;
@ -298,7 +302,7 @@ static unsigned parseCompressionParameters(const char* stringPtr, ZSTD_compressi
if (longCommandWArg(&stringPtr, "searchLog=") || longCommandWArg(&stringPtr, "slog=")) { params->searchLog = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "searchLength=") || longCommandWArg(&stringPtr, "slen=")) { params->searchLength = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "targetLength=") || longCommandWArg(&stringPtr, "tlen=")) { params->targetLength = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "strategy=") || longCommandWArg(&stringPtr, "strat=")) { params->strategy = (ZSTD_strategy)(1 + readU32FromChar(&stringPtr)); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "strategy=") || longCommandWArg(&stringPtr, "strat=")) { params->strategy = (ZSTD_strategy)(readU32FromChar(&stringPtr)); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
if (longCommandWArg(&stringPtr, "overlapLog=") || longCommandWArg(&stringPtr, "ovlog=")) { g_overlapLog = readU32FromChar(&stringPtr); if (stringPtr[0]==',') { stringPtr++; continue; } else break; }
return 0;
}
@ -310,7 +314,7 @@ static unsigned parseCompressionParameters(const char* stringPtr, ZSTD_compressi
}
typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train } zstd_operation_mode;
typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode;
#define CLEAN_RETURN(i) { operationResult = (i); goto _end; }
@ -354,7 +358,7 @@ int main(int argCount, const char* argv[])
unsigned fileNamesNb;
#endif
#ifndef ZSTD_NODICT
COVER_params_t coverParams = defaultCoverParams();
ZDICT_cover_params_t coverParams = defaultCoverParams();
int cover = 1;
#endif
@ -377,7 +381,9 @@ int main(int argCount, const char* argv[])
if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(1); } /* behave like gunzip */
if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; forceStdout=1; FIO_overwriteMode(); outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat */
if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; FIO_setCompressionType(FIO_lzmaCompression); FIO_setRemoveSrcFile(1); } /* behave like lzma */
if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; FIO_setCompressionType(FIO_lzmaCompression); FIO_setRemoveSrcFile(1); } /* behave like unlzma */
if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(FIO_xzCompression); FIO_setRemoveSrcFile(1); } /* behave like xz */
if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; FIO_setCompressionType(FIO_xzCompression); FIO_setRemoveSrcFile(1); } /* behave like unxz */
memset(&compressionParams, 0, sizeof(compressionParams));
/* command switches */
@ -401,6 +407,7 @@ int main(int argCount, const char* argv[])
if (argument[1]=='-') {
/* long commands (--long-word) */
if (!strcmp(argument, "--")) { nextArgumentsAreFiles=1; continue; } /* only file names allowed from now on */
if (!strcmp(argument, "--list")) { operation=zom_list; continue; }
if (!strcmp(argument, "--compress")) { operation=zom_compress; continue; }
if (!strcmp(argument, "--decompress")) { operation=zom_decompress; continue; }
if (!strcmp(argument, "--uncompress")) { operation=zom_decompress; continue; }
@ -531,7 +538,7 @@ int main(int argCount, const char* argv[])
argument++;
memLimit = readU32FromChar(&argument);
break;
case 'l': operation=zom_list; argument++; break;
#ifdef UTIL_HAS_CREATEFILELIST
/* recursive */
case 'r': recursive=1; argument++; break;
@ -672,7 +679,10 @@ int main(int argCount, const char* argv[])
}
}
#endif
if (operation == zom_list) {
int const ret = FIO_listMultipleFiles(filenameIdx, filenameTable, g_displayLevel);
CLEAN_RETURN(ret);
}
/* Check if benchmark is selected */
if (operation==zom_bench) {
#ifndef ZSTD_NOBENCH
@ -689,20 +699,20 @@ int main(int argCount, const char* argv[])
/* Check if dictionary builder is selected */
if (operation==zom_train) {
#ifndef ZSTD_NODICT
ZDICT_params_t zParams;
zParams.compressionLevel = dictCLevel;
zParams.notificationLevel = g_displayLevel;
zParams.dictID = dictID;
if (cover) {
int const optimize = !coverParams.k || !coverParams.d;
coverParams.nbThreads = nbThreads;
coverParams.compressionLevel = dictCLevel;
coverParams.notificationLevel = g_displayLevel;
coverParams.dictID = dictID;
coverParams.zParams = zParams;
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, NULL, &coverParams, optimize);
} else {
ZDICT_params_t dictParams;
ZDICT_legacy_params_t dictParams;
memset(&dictParams, 0, sizeof(dictParams));
dictParams.compressionLevel = dictCLevel;
dictParams.selectivityLevel = dictSelect;
dictParams.notificationLevel = g_displayLevel;
dictParams.dictID = dictID;
dictParams.zParams = zParams;
operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, &dictParams, NULL, 0);
}
#endif

View File

@ -25,17 +25,18 @@ PRGDIR = ../programs
PYTHON ?= python3
TESTARTEFACT := versionsTest namespaceTest
DEBUGFLAGS=-g -DZSTD_DEBUG=1
CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
-I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) \
$(DEBUGFLAG)
CFLAGS ?= -O3
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wformat-security
CFLAGS += $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS)
DEBUGLEVEL= 1
DEBUGFLAGS= -g -DZSTD_DEBUG=$(DEBUGLEVEL)
CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
-I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
CFLAGS ?= -O3
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wformat-security \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
-Wredundant-decls
CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS)
ZSTDCOMMON_FILES := $(ZSTDDIR)/common/*.c
@ -92,11 +93,12 @@ zstd-nolegacy:
gzstd:
$(MAKE) -C $(PRGDIR) $@
fullbench : $(ZSTD_FILES) $(PRGDIR)/datagen.c fullbench.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
fullbench32 : $(ZSTD_FILES) $(PRGDIR)/datagen.c fullbench.c
$(CC) -m32 $(FLAGS) $^ -o $@$(EXT)
fullbench32: CPPFLAGS += -m32
fullbench fullbench32 : CPPFLAGS += $(MULTITHREAD_CPP)
fullbench fullbench32 : LDFLAGS += $(MULTITHREAD_LD)
fullbench fullbench32 : DEBUGFLAGS = # turn off assert() for speed measurements
fullbench fullbench32 : $(ZSTD_FILES) $(PRGDIR)/datagen.c fullbench.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
fullbench-lib: $(PRGDIR)/datagen.c fullbench.c
$(MAKE) -C $(ZSTDDIR) libzstd.a
@ -157,7 +159,7 @@ zstreamtest-dll : $(ZSTDDIR)/common/xxhash.c $(PRGDIR)/datagen.c zstreamtest.c
$(MAKE) -C $(ZSTDDIR) libzstd
$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@$(EXT)
paramgrill : DEBUGFLAG =
paramgrill : DEBUGFLAGS =
paramgrill : $(ZSTD_FILES) $(PRGDIR)/datagen.c paramgrill.c
$(CC) $(FLAGS) $^ -lm -o $@$(EXT)
@ -178,7 +180,7 @@ legacy : CPPFLAGS+= -I$(ZSTDDIR)/legacy
legacy : $(ZSTD_FILES) $(wildcard $(ZSTDDIR)/legacy/*.c) legacy.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
decodecorpus : $(filter-out $(ZSTDDIR)/compress/zstd_compress.c, $(wildcard $(ZSTD_FILES))) decodecorpus.c
decodecorpus : $(filter-out $(ZSTDDIR)/compress/zstd_compress.c, $(wildcard $(ZSTD_FILES))) $(ZDICT_FILES) decodecorpus.c
$(CC) $(FLAGS) $^ -o $@$(EXT) -lm
symbols : symbols.c
@ -222,7 +224,7 @@ clean:
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))
HOST_OS = POSIX
valgrindTest: VALGRIND = valgrind --leak-check=full --error-exitcode=1
valgrindTest: VALGRIND = valgrind --leak-check=full --show-leak-kinds=all --error-exitcode=1
valgrindTest: zstd datagen fuzzer fullbench
@echo "\n ---- valgrind tests : memory analyzer ----"
$(VALGRIND) ./datagen -g50M > $(VOID)
@ -270,7 +272,7 @@ endif
test32: test-zstd32 test-fullbench32 test-fuzzer32 test-zstream32
test-all: test test32 valgrindTest
test-all: test test32 valgrindTest test-decodecorpus-cli
test-zstd: ZSTD = $(PRGDIR)/zstd
test-zstd: zstd zstd-playTests
@ -319,6 +321,8 @@ test-zbuff32: zbufftest32
test-zstream: zstreamtest
$(QEMU_SYS) ./zstreamtest $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
$(QEMU_SYS) ./zstreamtest --mt $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
$(QEMU_SYS) ./zstreamtest --newapi $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
test-zstream32: zstreamtest32
$(QEMU_SYS) ./zstreamtest32 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
@ -338,6 +342,39 @@ test-legacy: legacy
test-decodecorpus: decodecorpus
$(QEMU_SYS) ./decodecorpus -t $(DECODECORPUS_TESTTIME)
test-decodecorpus-cli: decodecorpus
@echo "\n ---- decodecorpus basic cli tests ----"
@mkdir testdir
./decodecorpus -n5 -otestdir -ptestdir
@cd testdir && \
$(ZSTD) -d z000000.zst -o tmp0 && \
$(ZSTD) -d z000001.zst -o tmp1 && \
$(ZSTD) -d z000002.zst -o tmp2 && \
$(ZSTD) -d z000003.zst -o tmp3 && \
$(ZSTD) -d z000004.zst -o tmp4 && \
diff z000000 tmp0 && \
diff z000001 tmp1 && \
diff z000002 tmp2 && \
diff z000003 tmp3 && \
diff z000004 tmp4 && \
rm ./* && \
cd ..
@echo "\n ---- decodecorpus dictionary cli tests ----"
./decodecorpus -n5 -otestdir -ptestdir --use-dict=1MB
@cd testdir && \
$(ZSTD) -d z000000.zst -D dictionary -o tmp0 && \
$(ZSTD) -d z000001.zst -D dictionary -o tmp1 && \
$(ZSTD) -d z000002.zst -D dictionary -o tmp2 && \
$(ZSTD) -d z000003.zst -D dictionary -o tmp3 && \
$(ZSTD) -d z000004.zst -D dictionary -o tmp4 && \
diff z000000 tmp0 && \
diff z000001 tmp1 && \
diff z000002 tmp2 && \
diff z000003 tmp3 && \
diff z000004 tmp4 && \
cd ..
@rm -rf testdir
test-pool: pool
$(QEMU_SYS) ./pool

View File

@ -48,7 +48,8 @@ static int usage(const char* programName)
DISPLAY( "Arguments :\n");
DISPLAY( " -g# : generate # data (default:%i)\n", SIZE_DEFAULT);
DISPLAY( " -s# : Select seed (default:%i)\n", SEED_DEFAULT);
DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n", COMPRESSIBILITY_DEFAULT);
DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n",
COMPRESSIBILITY_DEFAULT);
DISPLAY( " -h : display help and exit\n");
return 0;
}
@ -56,7 +57,7 @@ static int usage(const char* programName)
int main(int argc, const char** argv)
{
double proba = (double)COMPRESSIBILITY_DEFAULT / 100;
unsigned probaU32 = COMPRESSIBILITY_DEFAULT;
double litProba = 0.0;
U64 size = SIZE_DEFAULT;
U32 seed = SEED_DEFAULT;
@ -94,11 +95,10 @@ int main(int argc, const char** argv)
break;
case 'P':
argument++;
proba=0.0;
probaU32 = 0;
while ((*argument>='0') && (*argument<='9'))
proba *= 10, proba += *argument++ - '0';
if (proba>100.) proba=100.;
proba /= 100.;
probaU32 *= 10, probaU32 += *argument++ - '0';
if (probaU32>100) probaU32 = 100;
break;
case 'L': /* hidden argument : Literal distribution probability */
argument++;
@ -117,11 +117,12 @@ int main(int argc, const char** argv)
}
} } } /* for(argNb=1; argNb<argc; argNb++) */
DISPLAYLEVEL(4, "Data Generator \n");
DISPLAYLEVEL(4, "Compressible data Generator \n");
if (probaU32!=COMPRESSIBILITY_DEFAULT)
DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32);
DISPLAYLEVEL(3, "Seed = %u \n", seed);
if (proba!=COMPRESSIBILITY_DEFAULT) DISPLAYLEVEL(3, "Compressibility : %i%%\n", (U32)(proba*100));
RDG_genStdout(size, proba, litProba, seed);
RDG_genStdout(size, (double)probaU32/100, litProba, seed);
DISPLAYLEVEL(1, "\n");
return 0;

View File

@ -18,6 +18,8 @@
#include "zstd.h"
#include "zstd_internal.h"
#include "mem.h"
#define ZDICT_STATIC_LINKING_ONLY
#include "zdict.h"
// Direct access to internal compression functions is required
#include "zstd_compress.c"
@ -73,8 +75,6 @@ static clock_t clockSpan(clock_t cStart)
/*-*******************************************************
* Random function
*********************************************************/
#define CLAMP(x, a, b) ((x) < (a) ? (a) : ((x) > (b) ? (b) : (x)))
static unsigned RAND(unsigned* src)
{
#define RAND_rotl32(x,r) ((x << r) | (x >> (32 - r)))
@ -231,6 +231,12 @@ typedef struct {
cblockStats_t oldStats; /* so they can be rolled back if uncompressible */
} frame_t;
typedef struct {
int useDict;
U32 dictID;
size_t dictContentSize;
BYTE* dictContent;
} dictInfo;
/*-*******************************************************
* Generator Functions
*********************************************************/
@ -240,7 +246,7 @@ struct {
} opts; /* advanced options on generation */
/* Generate and write a random frame header */
static void writeFrameHeader(U32* seed, frame_t* frame)
static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
{
BYTE* const op = frame->data;
size_t pos = 0;
@ -306,15 +312,26 @@ static void writeFrameHeader(U32* seed, frame_t* frame)
pos += 4;
{
/*
* fcsCode: 2-bit flag specifying how many bytes used to represent Frame_Content_Size (bits 7-6)
* singleSegment: 1-bit flag describing if data must be regenerated within a single continuous memory segment. (bit 5)
* contentChecksumFlag: 1-bit flag that is set if frame includes checksum at the end -- set to 1 below (bit 2)
* dictBits: 2-bit flag describing how many bytes Dictionary_ID uses -- set to 3 (bits 1-0)
* For more information: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_header
*/
int const dictBits = info.useDict ? 3 : 0;
BYTE const frameHeaderDescriptor =
(BYTE) ((fcsCode << 6) | (singleSegment << 5) | (1 << 2));
(BYTE) ((fcsCode << 6) | (singleSegment << 5) | (1 << 2) | dictBits);
op[pos++] = frameHeaderDescriptor;
}
if (!singleSegment) {
op[pos++] = windowByte;
}
if (info.useDict) {
MEM_writeLE32(op + pos, (U32) info.dictID);
pos += 4;
}
if (contentSizeFlag) {
switch (fcsCode) {
default: /* Impossible */
@ -605,7 +622,7 @@ static inline void initSeqStore(seqStore_t *seqStore) {
/* Randomly generate sequence commands */
static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
size_t contentSize, size_t literalsSize)
size_t contentSize, size_t literalsSize, dictInfo info)
{
/* The total length of all the matches */
size_t const remainingMatch = contentSize - literalsSize;
@ -629,7 +646,6 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
}
DISPLAYLEVEL(5, " total match lengths: %u\n", (U32)remainingMatch);
for (i = 0; i < numSequences; i++) {
/* Generate match and literal lengths by exponential distribution to
* ensure nice numbers */
@ -654,14 +670,33 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
memcpy(srcPtr, literals, literalLen);
srcPtr += literalLen;
do {
if (RAND(seed) & 7) {
/* do a normal offset */
U32 const dataDecompressed = (U32)((BYTE*)srcPtr-(BYTE*)frame->srcStart);
offset = (RAND(seed) %
MIN(frame->header.windowSize,
(size_t)((BYTE*)srcPtr - (BYTE*)frame->srcStart))) +
1;
if (info.useDict && (RAND(seed) & 1) && i + 1 != numSequences && dataDecompressed < frame->header.windowSize) {
/* need to occasionally generate offsets that go past the start */
/* including i+1 != numSequences because the last sequences has to adhere to predetermined contentSize */
U32 lenPastStart = (RAND(seed) % info.dictContentSize) + 1;
offset = (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart)+lenPastStart;
if (offset > frame->header.windowSize) {
if (lenPastStart < MIN_SEQ_LEN) {
/* when offset > windowSize, matchLen bound by end of dictionary (lenPastStart) */
/* this also means that lenPastStart must be greater than MIN_SEQ_LEN */
/* make sure lenPastStart does not go past dictionary start though */
lenPastStart = MIN(lenPastStart+MIN_SEQ_LEN, (U32)info.dictContentSize);
offset = (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart) + lenPastStart;
}
{
U32 const matchLenBound = MIN(frame->header.windowSize, lenPastStart);
matchLen = MIN(matchLen, matchLenBound);
}
}
}
offsetCode = offset + ZSTD_REP_MOVE;
repIndex = 2;
} else {
@ -677,11 +712,20 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
repIndex = MIN(2, offsetCode + 1);
}
}
} while (offset > (size_t)((BYTE*)srcPtr - (BYTE*)frame->srcStart) || offset == 0);
} while (((!info.useDict) && (offset > (size_t)((BYTE*)srcPtr - (BYTE*)frame->srcStart))) || offset == 0);
{ size_t j;
{
size_t j;
BYTE* const dictEnd = info.dictContent + info.dictContentSize;
for (j = 0; j < matchLen; j++) {
*srcPtr = *(srcPtr-offset);
if ((U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart) < offset) {
/* copy from dictionary instead of literals */
size_t const dictOffset = offset - (srcPtr - (BYTE*)frame->srcStart);
*srcPtr = *(dictEnd - dictOffset);
}
else {
*srcPtr = *(srcPtr-offset);
}
srcPtr++;
}
}
@ -931,7 +975,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
}
static size_t writeSequencesBlock(U32* seed, frame_t* frame, size_t contentSize,
size_t literalsSize)
size_t literalsSize, dictInfo info)
{
seqStore_t seqStore;
size_t numSequences;
@ -940,14 +984,14 @@ static size_t writeSequencesBlock(U32* seed, frame_t* frame, size_t contentSize,
initSeqStore(&seqStore);
/* randomly generate sequences */
numSequences = generateSequences(seed, frame, &seqStore, contentSize, literalsSize);
numSequences = generateSequences(seed, frame, &seqStore, contentSize, literalsSize, info);
/* write them out to the frame data */
CHECKERR(writeSequences(seed, frame, &seqStore, numSequences));
return numSequences;
}
static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize)
static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize, dictInfo info)
{
BYTE* const blockStart = (BYTE*)frame->data;
size_t literalsSize;
@ -959,7 +1003,7 @@ static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize
DISPLAYLEVEL(4, " literals size: %u\n", (U32)literalsSize);
nbSeq = writeSequencesBlock(seed, frame, contentSize, literalsSize);
nbSeq = writeSequencesBlock(seed, frame, contentSize, literalsSize, info);
DISPLAYLEVEL(4, " number of sequences: %u\n", (U32)nbSeq);
@ -967,7 +1011,7 @@ static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize
}
static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
int lastBlock)
int lastBlock, dictInfo info)
{
int const blockTypeDesc = RAND(seed) % 8;
size_t blockSize;
@ -1007,7 +1051,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
frame->oldStats = frame->stats;
frame->data = op;
compressedSize = writeCompressedBlock(seed, frame, contentSize);
compressedSize = writeCompressedBlock(seed, frame, contentSize, info);
if (compressedSize > contentSize) {
blockType = 0;
memcpy(op, frame->src, contentSize);
@ -1033,7 +1077,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
frame->data = op;
}
static void writeBlocks(U32* seed, frame_t* frame)
static void writeBlocks(U32* seed, frame_t* frame, dictInfo info)
{
size_t contentLeft = frame->header.contentSize;
size_t const maxBlockSize = MIN(MAX_BLOCK_SIZE, frame->header.windowSize);
@ -1056,7 +1100,7 @@ static void writeBlocks(U32* seed, frame_t* frame)
}
}
writeBlock(seed, frame, blockContentSize, lastBlock);
writeBlock(seed, frame, blockContentSize, lastBlock, info);
contentLeft -= blockContentSize;
if (lastBlock) break;
@ -1121,20 +1165,102 @@ static void initFrame(frame_t* fr)
}
/* Return the final seed */
static U32 generateFrame(U32 seed, frame_t* fr)
static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info)
{
/* generate a complete frame */
DISPLAYLEVEL(1, "frame seed: %u\n", seed);
initFrame(fr);
writeFrameHeader(&seed, fr);
writeBlocks(&seed, fr);
writeFrameHeader(&seed, fr, info);
writeBlocks(&seed, fr, info);
writeChecksum(fr);
return seed;
}
/*_*******************************************************
* Dictionary Helper Functions
*********************************************************/
/* returns 0 if successful, otherwise returns 1 upon error */
static int genRandomDict(U32 dictID, U32 seed, size_t dictSize, BYTE* fullDict){
/* allocate space for samples */
int ret = 0;
unsigned const numSamples = 4;
size_t sampleSizes[4];
BYTE* const samples = malloc(5000*sizeof(BYTE));
if (samples == NULL) {
DISPLAY("Error: could not allocate space for samples\n");
return 1;
}
/* generate samples */
{
unsigned literalValue = 1;
unsigned samplesPos = 0;
size_t currSize = 1;
while (literalValue <= 4) {
sampleSizes[literalValue - 1] = currSize;
{
size_t k;
for (k = 0; k < currSize; k++) {
*(samples + (samplesPos++)) = (BYTE)literalValue;
}
}
literalValue++;
currSize *= 16;
}
}
{
/* create variables */
size_t dictWriteSize = 0;
ZDICT_params_t zdictParams;
size_t const headerSize = MAX(dictSize/4, 256);
size_t const dictContentSize = dictSize - headerSize;
BYTE* const dictContent = fullDict + headerSize;
if (dictContentSize < ZDICT_CONTENTSIZE_MIN || dictSize < ZDICT_DICTSIZE_MIN) {
DISPLAY("Error: dictionary size is too small\n");
ret = 1;
goto exitGenRandomDict;
}
/* init dictionary params */
memset(&zdictParams, 0, sizeof(zdictParams));
zdictParams.dictID = dictID;
zdictParams.notificationLevel = 1;
/* fill in dictionary content */
RAND_buffer(&seed, (void*)dictContent, dictContentSize);
/* finalize dictionary with random samples */
dictWriteSize = ZDICT_finalizeDictionary(fullDict, dictSize,
dictContent, dictContentSize,
samples, sampleSizes, numSamples,
zdictParams);
if (ZDICT_isError(dictWriteSize)) {
DISPLAY("Could not finalize dictionary: %s\n", ZDICT_getErrorName(dictWriteSize));
ret = 1;
}
}
exitGenRandomDict:
free(samples);
return ret;
}
static dictInfo initDictInfo(int useDict, size_t dictContentSize, BYTE* dictContent, U32 dictID){
/* allocate space statically */
dictInfo dictOp;
memset(&dictOp, 0, sizeof(dictOp));
dictOp.useDict = useDict;
dictOp.dictContentSize = dictContentSize;
dictOp.dictContent = dictContent;
dictOp.dictID = dictID;
return dictOp;
}
/*-*******************************************************
* Test Mode
*********************************************************/
@ -1196,6 +1322,65 @@ static size_t testDecodeStreaming(frame_t* fr)
return ret;
}
static size_t testDecodeWithDict(U32 seed)
{
/* create variables */
size_t const dictSize = RAND(&seed) % (10 << 20) + ZDICT_DICTSIZE_MIN + ZDICT_CONTENTSIZE_MIN;
U32 const dictID = RAND(&seed);
size_t errorDetected = 0;
BYTE* const fullDict = malloc(dictSize);
if (fullDict == NULL) {
return ERROR(GENERIC);
}
/* generate random dictionary */
{
int const ret = genRandomDict(dictID, seed, dictSize, fullDict);
if (ret != 0) {
errorDetected = ERROR(GENERIC);
goto dictTestCleanup;
}
}
{
frame_t fr;
/* generate frame */
{
size_t const headerSize = MAX(dictSize/4, 256);
size_t const dictContentSize = dictSize-headerSize;
BYTE* const dictContent = fullDict+headerSize;
dictInfo const info = initDictInfo(1, dictContentSize, dictContent, dictID);
seed = generateFrame(seed, &fr, info);
}
/* manually decompress and check difference */
{
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
{
size_t const returnValue = ZSTD_decompress_usingDict(dctx, DECOMPRESSED_BUFFER, MAX_DECOMPRESSED_SIZE,
fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart,
fullDict, dictSize);
if (ZSTD_isError(returnValue)) {
errorDetected = returnValue;
goto dictTestCleanup;
}
}
if (memcmp(DECOMPRESSED_BUFFER, fr.srcStart, (BYTE*)fr.src - (BYTE*)fr.srcStart) != 0) {
errorDetected = ERROR(corruption_detected);
goto dictTestCleanup;
}
ZSTD_freeDCtx(dctx);
}
}
dictTestCleanup:
free(fullDict);
return errorDetected;
}
static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS)
{
unsigned fnum;
@ -1209,28 +1394,39 @@ static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS
for (fnum = 0; fnum < numFiles || clockSpan(startClock) < maxClockSpan; fnum++) {
frame_t fr;
U32 const seedCopy = seed;
if (fnum < numFiles)
DISPLAYUPDATE("\r%u/%u ", fnum, numFiles);
else
DISPLAYUPDATE("\r%u ", fnum);
seed = generateFrame(seed, &fr);
{
dictInfo const info = initDictInfo(0, 0, NULL, 0);
seed = generateFrame(seed, &fr, info);
}
{ size_t const r = testDecodeSimple(&fr);
if (ZSTD_isError(r)) {
DISPLAY("Error in simple mode on test seed %u: %s\n", seed + fnum,
DISPLAY("Error in simple mode on test seed %u: %s\n", seedCopy,
ZSTD_getErrorName(r));
return 1;
}
}
{ size_t const r = testDecodeStreaming(&fr);
if (ZSTD_isError(r)) {
DISPLAY("Error in streaming mode on test seed %u: %s\n", seed + fnum,
DISPLAY("Error in streaming mode on test seed %u: %s\n", seedCopy,
ZSTD_getErrorName(r));
return 1;
}
}
{
/* don't create a dictionary that is too big */
size_t const r = testDecodeWithDict(seed);
if (ZSTD_isError(r)) {
DISPLAY("Error in dictionary mode on test seed %u: %s\n", seedCopy, ZSTD_getErrorName(r));
return 1;
}
}
}
DISPLAY("\r%u tests completed: ", fnum);
@ -1250,7 +1446,10 @@ static int generateFile(U32 seed, const char* const path,
DISPLAY("seed: %u\n", seed);
generateFrame(seed, &fr);
{
dictInfo const info = initDictInfo(0, 0, NULL, 0);
generateFrame(seed, &fr, info);
}
outputBuffer(fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart, path);
if (origPath) {
@ -1272,7 +1471,10 @@ static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
DISPLAYUPDATE("\r%u/%u ", fnum, numFiles);
seed = generateFrame(seed, &fr);
{
dictInfo const info = initDictInfo(0, 0, NULL, 0);
seed = generateFrame(seed, &fr, info);
}
if (snprintf(outPath, MAX_PATH, "%s/z%06u.zst", path, fnum) + 1 > MAX_PATH) {
DISPLAY("Error: path too long\n");
@ -1294,6 +1496,93 @@ static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
return 0;
}
static int generateCorpusWithDict(U32 seed, unsigned numFiles, const char* const path,
const char* const origPath, const size_t dictSize)
{
char outPath[MAX_PATH];
BYTE* fullDict;
U32 const dictID = RAND(&seed);
int errorDetected = 0;
if (snprintf(outPath, MAX_PATH, "%s/dictionary", path) + 1 > MAX_PATH) {
DISPLAY("Error: path too long\n");
return 1;
}
/* allocate space for the dictionary */
fullDict = malloc(dictSize);
if (fullDict == NULL) {
DISPLAY("Error: could not allocate space for full dictionary.\n");
return 1;
}
/* randomly generate the dictionary */
{
int const ret = genRandomDict(dictID, seed, dictSize, fullDict);
if (ret != 0) {
errorDetected = ret;
goto dictCleanup;
}
}
/* write out dictionary */
if (numFiles != 0) {
if (snprintf(outPath, MAX_PATH, "%s/dictionary", path) + 1 > MAX_PATH) {
DISPLAY("Error: dictionary path too long\n");
errorDetected = 1;
goto dictCleanup;
}
outputBuffer(fullDict, dictSize, outPath);
}
else {
outputBuffer(fullDict, dictSize, "dictionary");
}
/* generate random compressed/decompressed files */
{
unsigned fnum;
for (fnum = 0; fnum < MAX(numFiles, 1); fnum++) {
frame_t fr;
DISPLAYUPDATE("\r%u/%u ", fnum, numFiles);
{
size_t const headerSize = MAX(dictSize/4, 256);
size_t const dictContentSize = dictSize-headerSize;
BYTE* const dictContent = fullDict+headerSize;
dictInfo const info = initDictInfo(1, dictContentSize, dictContent, dictID);
seed = generateFrame(seed, &fr, info);
}
if (numFiles != 0) {
if (snprintf(outPath, MAX_PATH, "%s/z%06u.zst", path, fnum) + 1 > MAX_PATH) {
DISPLAY("Error: path too long\n");
errorDetected = 1;
goto dictCleanup;
}
outputBuffer(fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart, outPath);
if (origPath) {
if (snprintf(outPath, MAX_PATH, "%s/z%06u", origPath, fnum) + 1 > MAX_PATH) {
DISPLAY("Error: path too long\n");
errorDetected = 1;
goto dictCleanup;
}
outputBuffer(fr.srcStart, (BYTE*)fr.src - (BYTE*)fr.srcStart, outPath);
}
}
else {
outputBuffer(fr.dataStart, (BYTE*)fr.data - (BYTE*)fr.dataStart, path);
if (origPath) {
outputBuffer(fr.srcStart, (BYTE*)fr.src - (BYTE*)fr.srcStart, origPath);
}
}
}
}
dictCleanup:
free(fullDict);
return errorDetected;
}
/*_*******************************************************
* Command line
@ -1339,6 +1628,40 @@ static void advancedUsage(const char* programName)
DISPLAY( "\n");
DISPLAY( "Advanced arguments :\n");
DISPLAY( " --content-size : always include the content size in the frame header\n");
DISPLAY( " --use-dict=# : include a dictionary used to decompress the corpus\n");
}
/*! readU32FromChar() :
@return : unsigned integer value read from input in `char` format
allows and interprets K, KB, KiB, M, MB and MiB suffix.
Will also modify `*stringPtr`, advancing it to position where it stopped reading.
Note : function result can overflow if digit string > MAX_UINT */
static unsigned readU32FromChar(const char** stringPtr)
{
unsigned result = 0;
while ((**stringPtr >='0') && (**stringPtr <='9'))
result *= 10, result += **stringPtr - '0', (*stringPtr)++ ;
if ((**stringPtr=='K') || (**stringPtr=='M')) {
result <<= 10;
if (**stringPtr=='M') result <<= 10;
(*stringPtr)++ ;
if (**stringPtr=='i') (*stringPtr)++;
if (**stringPtr=='B') (*stringPtr)++;
}
return result;
}
/** longCommandWArg() :
* check if *stringPtr is the same as longCommand.
* If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand.
* @return 0 and doesn't modify *stringPtr otherwise.
*/
static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
{
size_t const comSize = strlen(longCommand);
int const result = !strncmp(*stringPtr, longCommand, comSize);
if (result) *stringPtr += comSize;
return result;
}
int main(int argc, char** argv)
@ -1350,6 +1673,8 @@ int main(int argc, char** argv)
int testMode = 0;
const char* path = NULL;
const char* origPath = NULL;
int useDict = 0;
unsigned dictSize = (10 << 10); /* 10 kB default */
int argNb;
@ -1410,6 +1735,9 @@ int main(int argc, char** argv)
argument++;
if (strcmp(argument, "content-size") == 0) {
opts.contentSize = 1;
} else if (longCommandWArg(&argument, "use-dict=")) {
dictSize = readU32FromChar(&argument);
useDict = 1;
} else {
advancedUsage(argv[0]);
return 1;
@ -1441,9 +1769,13 @@ int main(int argc, char** argv)
return 1;
}
if (numFiles == 0) {
if (numFiles == 0 && useDict == 0) {
return generateFile(seed, path, origPath);
} else {
} else if (useDict == 0){
return generateCorpus(seed, numFiles, path, origPath);
} else {
/* should generate files with a dictionary */
return generateCorpusWithDict(seed, numFiles, path, origPath, dictSize);
}
}

View File

@ -14,19 +14,19 @@
#include "util.h" /* Compiler options, UTIL_GetFileSize */
#include <stdlib.h> /* malloc */
#include <stdio.h> /* fprintf, fopen, ftello64 */
#include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */
#include "mem.h"
#include "mem.h" /* U32 */
#ifndef ZSTD_DLL_IMPORT
#include "zstd_internal.h" /* ZSTD_blockHeaderSize, blockType_e, KB, MB */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressBegin, ZSTD_compressContinue, etc. */
#else
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
#endif
#include "zstd.h" /* ZSTD_VERSION_STRING */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressBegin, ZSTD_compressContinue, etc. */
#include "zstd.h" /* ZSTD_versionString */
#include "util.h" /* time functions */
#include "datagen.h"
@ -35,7 +35,7 @@
**************************************/
#define PROGRAM_DESCRIPTION "Zstandard speed analyzer"
#define AUTHOR "Yann Collet"
#define WELCOME_MESSAGE "*** %s %s %i-bits, by %s (%s) ***\n", PROGRAM_DESCRIPTION, ZSTD_VERSION_STRING, (int)(sizeof(void*)*8), AUTHOR, __DATE__
#define WELCOME_MESSAGE "*** %s %s %i-bits, by %s (%s) ***\n", PROGRAM_DESCRIPTION, ZSTD_versionString(), (int)(sizeof(void*)*8), AUTHOR, __DATE__
#define NBLOOPS 6
#define TIMELOOP_S 2
@ -69,12 +69,6 @@ static void BMK_SetNbIterations(U32 nbLoops)
/*_*******************************************************
* Private functions
*********************************************************/
static clock_t BMK_clockSpan( clock_t clockStart )
{
return clock() - clockStart; /* works even if overflow, span limited to <= ~30mn */
}
static size_t BMK_findMaxMem(U64 requiredMem)
{
size_t const step = 64 MB;
@ -116,8 +110,9 @@ size_t local_ZSTD_decompress(void* dst, size_t dstSize, void* buff2, const void*
return ZSTD_decompress(dst, dstSize, buff2, g_cSize);
}
#ifndef ZSTD_DLL_IMPORT
static ZSTD_DCtx* g_zdc = NULL;
#ifndef ZSTD_DLL_IMPORT
extern size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
size_t local_ZSTD_decodeLiteralsBlock(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
{
@ -153,6 +148,74 @@ size_t local_ZSTD_compressStream(void* dst, size_t dstCapacity, void* buff2, con
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_end(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
(void)buff2;
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionLevel, 1);
buffOut.dst = dst;
buffOut.size = dstCapacity;
buffOut.pos = 0;
buffIn.src = src;
buffIn.size = srcSize;
buffIn.pos = 0;
ZSTD_compress_generic(g_cstream, &buffOut, &buffIn, ZSTD_e_end);
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_continue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
(void)buff2;
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionLevel, 1);
buffOut.dst = dst;
buffOut.size = dstCapacity;
buffOut.pos = 0;
buffIn.src = src;
buffIn.size = srcSize;
buffIn.pos = 0;
ZSTD_compress_generic(g_cstream, &buffOut, &buffIn, ZSTD_e_continue);
ZSTD_compress_generic(g_cstream, &buffOut, &buffIn, ZSTD_e_end);
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_T2_end(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
(void)buff2;
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionLevel, 1);
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_nbThreads, 2);
buffOut.dst = dst;
buffOut.size = dstCapacity;
buffOut.pos = 0;
buffIn.src = src;
buffIn.size = srcSize;
buffIn.pos = 0;
while (ZSTD_compress_generic(g_cstream, &buffOut, &buffIn, ZSTD_e_end)) {}
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_T2_continue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
(void)buff2;
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_compressionLevel, 1);
ZSTD_CCtx_setParameter(g_cstream, ZSTD_p_nbThreads, 2);
buffOut.dst = dst;
buffOut.size = dstCapacity;
buffOut.pos = 0;
buffIn.src = src;
buffIn.size = srcSize;
buffIn.pos = 0;
ZSTD_compress_generic(g_cstream, &buffOut, &buffIn, ZSTD_e_continue);
while(ZSTD_compress_generic(g_cstream, &buffOut, &buffIn, ZSTD_e_end)) {}
return buffOut.pos;
}
static ZSTD_DStream* g_dstream= NULL;
static size_t local_ZSTD_decompressStream(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
{
@ -170,8 +233,9 @@ static size_t local_ZSTD_decompressStream(void* dst, size_t dstCapacity, void* b
return buffOut.pos;
}
#ifndef ZSTD_DLL_IMPORT
static ZSTD_CCtx* g_zcc = NULL;
#ifndef ZSTD_DLL_IMPORT
size_t local_ZSTD_compressContinue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
{
(void)buff2;
@ -236,33 +300,45 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
switch(benchNb)
{
case 1:
benchFunction = local_ZSTD_compress; benchName = "ZSTD_compress";
benchFunction = local_ZSTD_compress; benchName = "compress(1)";
break;
case 2:
benchFunction = local_ZSTD_decompress; benchName = "ZSTD_decompress";
benchFunction = local_ZSTD_decompress; benchName = "decompress";
break;
#ifndef ZSTD_DLL_IMPORT
case 11:
benchFunction = local_ZSTD_compressContinue; benchName = "ZSTD_compressContinue";
benchFunction = local_ZSTD_compressContinue; benchName = "compressContinue(1)";
break;
case 12:
benchFunction = local_ZSTD_compressContinue_extDict; benchName = "ZSTD_compressContinue_extDict";
benchFunction = local_ZSTD_compressContinue_extDict; benchName = "compressContinue_extDict";
break;
case 13:
benchFunction = local_ZSTD_decompressContinue; benchName = "ZSTD_decompressContinue";
benchFunction = local_ZSTD_decompressContinue; benchName = "decompressContinue";
break;
case 31:
benchFunction = local_ZSTD_decodeLiteralsBlock; benchName = "ZSTD_decodeLiteralsBlock";
benchFunction = local_ZSTD_decodeLiteralsBlock; benchName = "decodeLiteralsBlock";
break;
case 32:
benchFunction = local_ZSTD_decodeSeqHeaders; benchName = "ZSTD_decodeSeqHeaders";
benchFunction = local_ZSTD_decodeSeqHeaders; benchName = "decodeSeqHeaders";
break;
#endif
case 41:
benchFunction = local_ZSTD_compressStream; benchName = "ZSTD_compressStream";
benchFunction = local_ZSTD_compressStream; benchName = "compressStream(1)";
break;
case 42:
benchFunction = local_ZSTD_decompressStream; benchName = "ZSTD_decompressStream";
benchFunction = local_ZSTD_decompressStream; benchName = "decompressStream";
break;
case 51:
benchFunction = local_ZSTD_compress_generic_continue; benchName = "compress_generic, continue";
break;
case 52:
benchFunction = local_ZSTD_compress_generic_end; benchName = "compress_generic, end";
break;
case 61:
benchFunction = local_ZSTD_compress_generic_T2_continue; benchName = "compress_generic, -T2, continue";
break;
case 62:
benchFunction = local_ZSTD_compress_generic_T2_end; benchName = "compress_generic, -T2, end";
break;
default :
return 0;
@ -276,6 +352,10 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
free(dstBuff); free(buff2);
return 12;
}
if (g_zcc==NULL) g_zcc = ZSTD_createCCtx();
if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
if (g_cstream==NULL) g_cstream = ZSTD_createCStream();
if (g_dstream==NULL) g_dstream = ZSTD_createDStream();
/* Preparation */
switch(benchNb)
@ -284,23 +364,15 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, 1);
break;
#ifndef ZSTD_DLL_IMPORT
case 11 :
if (g_zcc==NULL) g_zcc = ZSTD_createCCtx();
break;
case 12 :
if (g_zcc==NULL) g_zcc = ZSTD_createCCtx();
break;
case 13 :
if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, 1);
break;
case 31: /* ZSTD_decodeLiteralsBlock */
if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
{ blockProperties_t bp;
ZSTD_frameParams zfp;
ZSTD_frameHeader zfp;
size_t frameHeaderSize, skippedSize;
g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
frameHeaderSize = ZSTD_getFrameParams(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
frameHeaderSize = ZSTD_getFrameHeader(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
if (frameHeaderSize==0) frameHeaderSize = ZSTD_frameHeaderSize_min;
ZSTD_getcBlockSize(dstBuff+frameHeaderSize, dstBuffSize, &bp); /* Get 1st block type */
if (bp.blockType != bt_compressed) {
@ -313,15 +385,14 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
break;
}
case 32: /* ZSTD_decodeSeqHeaders */
if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
{ blockProperties_t bp;
ZSTD_frameParams zfp;
ZSTD_frameHeader zfp;
const BYTE* ip = dstBuff;
const BYTE* iend;
size_t frameHeaderSize, cBlockSize;
ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1); /* it would be better to use direct block compression here */
g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
frameHeaderSize = ZSTD_getFrameParams(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
frameHeaderSize = ZSTD_getFrameHeader(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
if (frameHeaderSize==0) frameHeaderSize = ZSTD_frameHeaderSize_min;
ip += frameHeaderSize; /* Skip frame Header */
cBlockSize = ZSTD_getcBlockSize(ip, dstBuffSize, &bp); /* Get 1st block type */
@ -342,11 +413,7 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
case 31:
goto _cleanOut;
#endif
case 41 :
if (g_cstream==NULL) g_cstream = ZSTD_createCStream();
break;
case 42 :
if (g_dstream==NULL) g_dstream = ZSTD_createDStream();
g_cSize = ZSTD_compress(buff2, dstBuffSize, src, srcSize, 1);
break;
@ -359,30 +426,37 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
{ size_t i; for (i=0; i<dstBuffSize; i++) dstBuff[i]=(BYTE)i; } /* warming up memory */
{ U32 loopNb;
# define TIME_SEC_MICROSEC (1*1000000ULL) /* 1 second */
U64 const clockLoop = TIMELOOP_S * TIME_SEC_MICROSEC;
UTIL_freq_t ticksPerSecond;
UTIL_initTimer(&ticksPerSecond);
DISPLAY("%2i- %-30.30s : \r", benchNb, benchName);
for (loopNb = 1; loopNb <= g_nbIterations; loopNb++) {
clock_t const timeLoop = TIMELOOP_S * CLOCKS_PER_SEC;
clock_t clockStart;
U32 nbRounds;
UTIL_time_t clockStart;
size_t benchResult=0;
double averageTime;
U32 nbRounds;
clockStart = clock();
while (clock() == clockStart);
clockStart = clock();
for (nbRounds=0; BMK_clockSpan(clockStart) < timeLoop; nbRounds++) {
UTIL_sleepMilli(1); /* give processor time to other processes */
UTIL_waitForNextTick(ticksPerSecond);
UTIL_getTime(&clockStart);
for (nbRounds=0; UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop; nbRounds++) {
benchResult = benchFunction(dstBuff, dstBuffSize, buff2, src, srcSize);
if (ZSTD_isError(benchResult)) { DISPLAY("ERROR ! %s() => %s !! \n", benchName, ZSTD_getErrorName(benchResult)); exit(1); }
}
averageTime = (((double)BMK_clockSpan(clockStart)) / CLOCKS_PER_SEC) / nbRounds;
if (averageTime < bestTime) bestTime = averageTime;
DISPLAY("%2i- %-30.30s : %7.1f MB/s (%9u)\r", loopNb, benchName, (double)srcSize / (1 MB) / bestTime, (U32)benchResult);
} }
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
double const averageTime = (double)clockSpanMicro / TIME_SEC_MICROSEC / nbRounds;
if (averageTime < bestTime) bestTime = averageTime;
DISPLAY("%2i- %-30.30s : %7.1f MB/s (%9u)\r", loopNb, benchName, (double)srcSize / (1 MB) / bestTime, (U32)benchResult);
} } }
DISPLAY("%2u\n", benchNb);
_cleanOut:
free(dstBuff);
free(buff2);
ZSTD_freeCCtx(g_zcc); g_zcc=NULL;
ZSTD_freeDCtx(g_zdc); g_zdc=NULL;
ZSTD_freeCStream(g_cstream); g_cstream=NULL;
ZSTD_freeDStream(g_dstream); g_dstream=NULL;
return 0;
}

View File

@ -12,9 +12,9 @@
* Compiler specific
**************************************/
#ifdef _MSC_VER /* Visual Studio */
# define _CRT_SECURE_NO_WARNINGS /* fgets */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
# define _CRT_SECURE_NO_WARNINGS /* fgets */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
#endif
@ -25,7 +25,7 @@
#include <stdio.h> /* fgets, sscanf */
#include <string.h> /* strcmp */
#include <time.h> /* clock_t */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressContinue, ZSTD_compressBlock */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressContinue, ZSTD_compressBlock */
#include "zstd.h" /* ZSTD_VERSION_STRING */
#include "zstd_errors.h" /* ZSTD_getErrorCode */
#include "zstdmt_compress.h"
@ -74,7 +74,6 @@ static clock_t FUZ_clockSpan(clock_t cStart)
return clock() - cStart; /* works even when overflow; max span ~ 30mn */
}
#define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
static unsigned FUZ_rand(unsigned* src)
{
@ -104,6 +103,7 @@ static unsigned FUZ_highbit32(U32 v32)
#define CHECK_V(var, fn) size_t const var = fn; if (ZSTD_isError(var)) goto _output_error
#define CHECK(fn) { CHECK_V(err, fn); }
#define CHECKPLUS(var, fn, more) { CHECK_V(var, fn); more; }
static int basicUnitTests(U32 seed, double compressibility)
{
size_t const CNBuffSize = 5 MB;
@ -190,6 +190,89 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "OK \n");
/* Static CCtx tests */
#define STATIC_CCTX_LEVEL 3
DISPLAYLEVEL(4, "test%3i : create static CCtx for level %u :", testNb++, STATIC_CCTX_LEVEL);
{ size_t const staticCCtxSize = ZSTD_estimateCStreamSize(STATIC_CCTX_LEVEL);
void* const staticCCtxBuffer = malloc(staticCCtxSize);
size_t const staticDCtxSize = ZSTD_estimateDCtxSize();
void* const staticDCtxBuffer = malloc(staticDCtxSize);
if (staticCCtxBuffer==NULL || staticDCtxBuffer==NULL) {
free(staticCCtxBuffer);
free(staticDCtxBuffer);
DISPLAY("Not enough memory, aborting\n");
testResult = 1;
goto _end;
}
{ ZSTD_CCtx* staticCCtx = ZSTD_initStaticCCtx(staticCCtxBuffer, staticCCtxSize);
ZSTD_DCtx* staticDCtx = ZSTD_initStaticDCtx(staticDCtxBuffer, staticDCtxSize);
if ((staticCCtx==NULL) || (staticDCtx==NULL)) goto _output_error;
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : init CCtx for level %u : ", testNb++, STATIC_CCTX_LEVEL);
{ size_t const r = ZSTD_compressBegin(staticCCtx, STATIC_CCTX_LEVEL);
if (ZSTD_isError(r)) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : simple compression test with static CCtx : ", testNb++);
CHECKPLUS(r, ZSTD_compressCCtx(staticCCtx,
compressedBuffer, ZSTD_compressBound(CNBuffSize),
CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL),
cSize=r );
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n",
(U32)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(4, "test%3i : simple decompression test with static DCtx : ", testNb++);
{ size_t const r = ZSTD_decompressDCtx(staticDCtx,
decodedBuffer, CNBuffSize,
compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : check decompressed result : ", testNb++);
{ size_t u;
for (u=0; u<CNBuffSize; u++) {
if (((BYTE*)decodedBuffer)[u] != ((BYTE*)CNBuffer)[u])
goto _output_error;;
} }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : init CCtx for too large level (must fail) : ", testNb++);
{ size_t const r = ZSTD_compressBegin(staticCCtx, ZSTD_maxCLevel());
if (!ZSTD_isError(r)) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : init CCtx for small level %u (should work again) : ", testNb++, 1);
{ size_t const r = ZSTD_compressBegin(staticCCtx, 1);
if (ZSTD_isError(r)) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : init CStream for small level %u : ", testNb++, 1);
{ size_t const r = ZSTD_initCStream(staticCCtx, 1);
if (ZSTD_isError(r)) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : init CStream with dictionary (should fail) : ", testNb++);
{ size_t const r = ZSTD_initCStream_usingDict(staticCCtx, CNBuffer, 64 KB, 1);
if (!ZSTD_isError(r)) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : init DStream (should fail) : ", testNb++);
{ size_t const r = ZSTD_initDStream(staticDCtx);
if (ZSTD_isError(r)) goto _output_error; }
{ ZSTD_outBuffer output = { decodedBuffer, CNBuffSize, 0 };
ZSTD_inBuffer input = { compressedBuffer, ZSTD_FRAMEHEADERSIZE_MAX+1, 0 };
size_t const r = ZSTD_decompressStream(staticDCtx, &output, &input);
if (!ZSTD_isError(r)) goto _output_error;
}
DISPLAYLEVEL(4, "OK \n");
}
free(staticCCtxBuffer);
free(staticDCtxBuffer);
}
/* ZSTDMT simple MT compression test */
DISPLAYLEVEL(4, "test%3i : create ZSTDMT CCtx : ", testNb++);
{ ZSTDMT_CCtx* mtctx = ZSTDMT_createCCtx(2);
@ -321,13 +404,25 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : decompress with DDict : ", testNb++);
{ ZSTD_DDict* const ddict = ZSTD_createDDict_byReference(CNBuffer, dictSize);
{ ZSTD_DDict* const ddict = ZSTD_createDDict(CNBuffer, dictSize);
size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict);
if (r != CNBuffSize - dictSize) goto _output_error;
DISPLAYLEVEL(4, "OK (size of DDict : %u) \n", (U32)ZSTD_sizeof_DDict(ddict));
ZSTD_freeDDict(ddict);
}
DISPLAYLEVEL(4, "test%3i : decompress with static DDict : ", testNb++);
{ size_t const ddictBufferSize = ZSTD_estimateDDictSize(dictSize, 0);
void* ddictBuffer = malloc(ddictBufferSize);
if (ddictBuffer == NULL) goto _output_error;
{ ZSTD_DDict* const ddict = ZSTD_initStaticDDict(ddictBuffer, ddictBufferSize, CNBuffer, dictSize, 0);
size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict);
if (r != CNBuffSize - dictSize) goto _output_error;
}
free(ddictBuffer);
DISPLAYLEVEL(4, "OK (size of static DDict : %u) \n", (U32)ddictBufferSize);
}
DISPLAYLEVEL(4, "test%3i : check content size on duplicated context : ", testNb++);
{ size_t const testSize = CNBuffSize / 3;
{ ZSTD_parameters p = ZSTD_getParams(2, testSize, dictSize);
@ -339,8 +434,8 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECKPLUS(r, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, ZSTD_compressBound(testSize),
(const char*)CNBuffer + dictSize, testSize),
cSize = r);
{ ZSTD_frameParams fp;
if (ZSTD_getFrameParams(&fp, compressedBuffer, cSize)) goto _output_error;
{ ZSTD_frameHeader fp;
if (ZSTD_getFrameHeader(&fp, compressedBuffer, cSize)) goto _output_error;
if ((fp.frameContentSize != testSize) && (fp.frameContentSize != 0)) goto _output_error;
} }
DISPLAYLEVEL(4, "OK \n");
@ -404,10 +499,18 @@ static int basicUnitTests(U32 seed, double compressibility)
if (r != CNBuffSize) goto _output_error);
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : compress with preprocessed dictionary : ", testNb++);
DISPLAYLEVEL(4, "test%3i : estimate CDict size : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_customMem customMem = { NULL, NULL, NULL };
ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1, cParams, customMem);
size_t const estimatedSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, 1 /*byReference*/);
DISPLAYLEVEL(4, "OK : %u \n", (U32)estimatedSize);
}
DISPLAYLEVEL(4, "test%3i : compress with CDict ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize,
1 /* byReference */, ZSTD_dm_auto,
cParams, ZSTD_defaultCMem);
DISPLAYLEVEL(4, "(size : %u) : ", (U32)ZSTD_sizeof_CDict(cdict));
cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, ZSTD_compressBound(CNBuffSize),
CNBuffer, CNBuffSize, cdict);
ZSTD_freeCDict(cdict);
@ -429,11 +532,34 @@ static int basicUnitTests(U32 seed, double compressibility)
if (r != CNBuffSize) goto _output_error);
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : compress with static CDict : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, 0);
void* const cdictBuffer = malloc(cdictSize);
if (cdictBuffer==NULL) goto _output_error;
{ ZSTD_CDict* const cdict = ZSTD_initStaticCDict(cdictBuffer, cdictSize,
dictBuffer, dictSize,
0 /* by Reference */, ZSTD_dm_auto,
cParams);
if (cdict == NULL) {
DISPLAY("ZSTD_initStaticCDict failed ");
goto _output_error;
}
cSize = ZSTD_compress_usingCDict(cctx,
compressedBuffer, ZSTD_compressBound(CNBuffSize),
CNBuffer, CNBuffSize, cdict);
if (ZSTD_isError(cSize)) {
DISPLAY("ZSTD_compress_usingCDict failed ");
goto _output_error;
} }
free(cdictBuffer);
}
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(4, "test%3i : ZSTD_compress_usingCDict_advanced, no contentSize, no dictID : ", testNb++);
{ ZSTD_frameParameters const fParams = { 0 /* frameSize */, 1 /* checksum */, 1 /* noDictID*/ };
ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_customMem const customMem = { NULL, NULL, NULL };
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1, cParams, customMem);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1 /*byRef*/, ZSTD_dm_auto, cParams, ZSTD_defaultCMem);
cSize = ZSTD_compress_usingCDict_advanced(cctx, compressedBuffer, ZSTD_compressBound(CNBuffSize),
CNBuffer, CNBuffSize, cdict, fParams);
ZSTD_freeCDict(cdict);
@ -482,6 +608,22 @@ static int basicUnitTests(U32 seed, double compressibility)
}
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : Building cdict w/ ZSTD_dm_fullDict on a good dictionary : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1 /*byRef*/, ZSTD_dm_fullDict, cParams, ZSTD_defaultCMem);
if (cdict==NULL) goto _output_error;
ZSTD_freeCDict(cdict);
}
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : Building cdict w/ ZSTD_dm_fullDict on a rawContent (must fail) : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced((const char*)dictBuffer+1, dictSize-1, 1 /*byRef*/, ZSTD_dm_fullDict, cParams, ZSTD_defaultCMem);
if (cdict!=NULL) goto _output_error;
ZSTD_freeCDict(cdict);
}
DISPLAYLEVEL(4, "OK \n");
ZSTD_freeCCtx(cctx);
free(dictBuffer);
free(samplesSizes);
@ -496,7 +638,7 @@ static int basicUnitTests(U32 seed, double compressibility)
size_t const sampleUnitSize = 8 KB;
U32 const nbSamples = (U32)(totalSampleSize / sampleUnitSize);
size_t* const samplesSizes = (size_t*) malloc(nbSamples * sizeof(size_t));
COVER_params_t params;
ZDICT_cover_params_t params;
U32 dictID;
if (dictBuffer==NULL || samplesSizes==NULL) {
@ -505,14 +647,14 @@ static int basicUnitTests(U32 seed, double compressibility)
goto _output_error;
}
DISPLAYLEVEL(4, "test%3i : COVER_trainFromBuffer : ", testNb++);
DISPLAYLEVEL(4, "test%3i : ZDICT_trainFromBuffer_cover : ", testNb++);
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
memset(&params, 0, sizeof(params));
params.d = 1 + (FUZ_rand(&seed) % 16);
params.k = params.d + (FUZ_rand(&seed) % 256);
dictSize = COVER_trainFromBuffer(dictBuffer, dictSize,
CNBuffer, samplesSizes, nbSamples,
params);
dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, dictSize,
CNBuffer, samplesSizes, nbSamples,
params);
if (ZDICT_isError(dictSize)) goto _output_error;
DISPLAYLEVEL(4, "OK, created dictionary of size %u \n", (U32)dictSize);
@ -521,12 +663,12 @@ static int basicUnitTests(U32 seed, double compressibility)
if (dictID==0) goto _output_error;
DISPLAYLEVEL(4, "OK : %u \n", dictID);
DISPLAYLEVEL(4, "test%3i : COVER_optimizeTrainFromBuffer : ", testNb++);
DISPLAYLEVEL(4, "test%3i : ZDICT_optimizeTrainFromBuffer_cover : ", testNb++);
memset(&params, 0, sizeof(params));
params.steps = 4;
optDictSize = COVER_optimizeTrainFromBuffer(dictBuffer, optDictSize,
CNBuffer, samplesSizes, nbSamples / 4,
&params);
optDictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, optDictSize,
CNBuffer, samplesSizes,
nbSamples / 4, &params);
if (ZDICT_isError(optDictSize)) goto _output_error;
DISPLAYLEVEL(4, "OK, created dictionary of size %u \n", (U32)optDictSize);
@ -560,9 +702,7 @@ static int basicUnitTests(U32 seed, double compressibility)
size_t const wrongSrcSize = (srcSize + 1000);
ZSTD_parameters params = ZSTD_getParams(1, wrongSrcSize, 0);
params.fParams.contentSizeFlag = 1;
{ size_t const result = ZSTD_compressBegin_advanced(cctx, NULL, 0, params, wrongSrcSize);
if (ZSTD_isError(result)) goto _output_error;
}
CHECK( ZSTD_compressBegin_advanced(cctx, NULL, 0, params, wrongSrcSize) );
{ size_t const result = ZSTD_compressEnd(cctx, decodedBuffer, CNBuffSize, CNBuffer, srcSize);
if (!ZSTD_isError(result)) goto _output_error;
if (ZSTD_getErrorCode(result) != ZSTD_error_srcSize_wrong) goto _output_error;
@ -580,6 +720,7 @@ static int basicUnitTests(U32 seed, double compressibility)
/* basic block compression */
DISPLAYLEVEL(4, "test%3i : Block compression test : ", testNb++);
CHECK( ZSTD_compressBegin(cctx, 5) );
CHECK( ZSTD_getBlockSize(cctx) >= blockSize);
cSize = ZSTD_compressBlock(cctx, compressedBuffer, ZSTD_compressBound(blockSize), CNBuffer, blockSize);
if (ZSTD_isError(cSize)) goto _output_error;
DISPLAYLEVEL(4, "OK \n");
@ -743,8 +884,23 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
}
#undef CHECK
#define CHECK(cond, ...) if (cond) { DISPLAY("Error => "); DISPLAY(__VA_ARGS__); \
DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); goto _output_error; }
#define CHECK(cond, ...) { \
if (cond) { \
DISPLAY("Error => "); \
DISPLAY(__VA_ARGS__); \
DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \
goto _output_error; \
} }
#define CHECK_Z(f) { \
size_t const err = f; \
if (ZSTD_isError(err)) { \
DISPLAY("Error => %s : %s ", \
#f, ZSTD_getErrorName(err)); \
DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \
goto _output_error; \
} }
static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxDurationS, double compressibility, int bigTests)
{
@ -856,9 +1012,8 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
}
/* frame header decompression test */
{ ZSTD_frameParams dParams;
size_t const check = ZSTD_getFrameParams(&dParams, cBuffer, cSize);
CHECK(ZSTD_isError(check), "Frame Parameters extraction failed");
{ ZSTD_frameHeader dParams;
CHECK_Z( ZSTD_getFrameHeader(&dParams, cBuffer, cSize) );
CHECK(dParams.frameContentSize != sampleSize, "Frame content size incorrect");
}
@ -945,20 +1100,17 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
dict = srcBuffer + (FUZ_rand(&lseed) % (srcBufferSize - dictSize));
if (FUZ_rand(&lseed) & 0xF) {
size_t const errorCode = ZSTD_compressBegin_usingDict(refCtx, dict, dictSize, cLevel);
CHECK (ZSTD_isError(errorCode), "ZSTD_compressBegin_usingDict error : %s", ZSTD_getErrorName(errorCode));
CHECK_Z ( ZSTD_compressBegin_usingDict(refCtx, dict, dictSize, cLevel) );
} else {
ZSTD_compressionParameters const cPar = ZSTD_getCParams(cLevel, 0, dictSize);
ZSTD_frameParameters const fPar = { FUZ_rand(&lseed)&1 /* contentSizeFlag */,
!(FUZ_rand(&lseed)&3) /* contentChecksumFlag*/,
0 /*NodictID*/ }; /* note : since dictionary is fake, dictIDflag has no impact */
ZSTD_parameters const p = FUZ_makeParams(cPar, fPar);
size_t const errorCode = ZSTD_compressBegin_advanced(refCtx, dict, dictSize, p, 0);
CHECK (ZSTD_isError(errorCode), "ZSTD_compressBegin_advanced error : %s", ZSTD_getErrorName(errorCode));
CHECK_Z ( ZSTD_compressBegin_advanced(refCtx, dict, dictSize, p, 0) );
}
{ size_t const errorCode = ZSTD_copyCCtx(ctx, refCtx, 0);
CHECK (ZSTD_isError(errorCode), "ZSTD_copyCCtx error : %s", ZSTD_getErrorName(errorCode));
} }
CHECK_Z( ZSTD_copyCCtx(ctx, refCtx, 0) );
}
ZSTD_setCCtxParameter(ctx, ZSTD_p_forceWindow, FUZ_rand(&lseed) & 1);
{ U32 const nbChunks = (FUZ_rand(&lseed) & 127) + 2;
@ -990,8 +1142,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
/* streaming decompression test */
if (dictSize<8) dictSize=0, dict=NULL; /* disable dictionary */
{ size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, dict, dictSize);
CHECK (ZSTD_isError(errorCode), "ZSTD_decompressBegin_usingDict error : %s", ZSTD_getErrorName(errorCode)); }
CHECK_Z( ZSTD_decompressBegin_usingDict(dctx, dict, dictSize) );
totalCSize = 0;
totalGenSize = 0;
while (totalCSize < cSize) {

View File

@ -38,7 +38,7 @@
#define GB *(1ULL<<30)
#define NBLOOPS 2
#define TIMELOOP (2 * CLOCKS_PER_SEC)
#define TIMELOOP (2 * CLOCKS_PER_SEC)
#define NB_LEVELS_TRACKED 30
@ -47,7 +47,7 @@ static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t
#define COMPRESSIBILITY_DEFAULT 0.50
static const size_t sampleSize = 10000000;
static const U32 g_grillDuration_s = 60000; /* about 16 hours */
static const double g_grillDuration_s = 90000; /* about 24 hours */
static const clock_t g_maxParamTime = 15 * CLOCKS_PER_SEC;
static const clock_t g_maxVariationTime = 60 * CLOCKS_PER_SEC;
static const int g_maxNbVariations = 64;
@ -87,9 +87,11 @@ void BMK_SetNbIterations(int nbLoops)
* Private functions
*********************************************************/
static clock_t BMK_clockSpan(clock_t cStart) { return clock() - cStart; } /* works even if overflow ; max span ~ 30 mn */
/* works even if overflow ; max span ~ 30 mn */
static clock_t BMK_clockSpan(clock_t cStart) { return clock() - cStart; }
static U32 BMK_timeSpan(time_t tStart) { return (U32)difftime(time(NULL), tStart); } /* accuracy in seconds only, span can be multiple years */
/* accuracy in seconds only, span can be multiple years */
static double BMK_timeSpan(time_t tStart) { return difftime(time(NULL), tStart); }
static size_t BMK_findMaxMem(U64 requiredMem)
@ -307,14 +309,14 @@ static size_t BMK_benchParam(BMK_result_t* resultPtr,
}
const char* g_stratName[] = { "ZSTD_fast ",
"ZSTD_dfast ",
"ZSTD_greedy ",
"ZSTD_lazy ",
"ZSTD_lazy2 ",
"ZSTD_btlazy2",
"ZSTD_btopt ",
"ZSTD_btopt2 "};
const char* g_stratName[] = { "ZSTD_fast ",
"ZSTD_dfast ",
"ZSTD_greedy ",
"ZSTD_lazy ",
"ZSTD_lazy2 ",
"ZSTD_btlazy2 ",
"ZSTD_btopt ",
"ZSTD_btultra "};
static void BMK_printWinner(FILE* f, U32 cLevel, BMK_result_t result, ZSTD_compressionParameters params, size_t srcSize)
{
@ -388,8 +390,8 @@ static int BMK_seed(winnerInfo_t* winners, const ZSTD_compressionParameters para
double W_DMemUsed_note = W_ratioNote * ( 40 + 9*cLevel) - log((double)W_DMemUsed);
double O_DMemUsed_note = O_ratioNote * ( 40 + 9*cLevel) - log((double)O_DMemUsed);
size_t W_CMemUsed = (1 << params.windowLog) + ZSTD_estimateCCtxSize(params);
size_t O_CMemUsed = (1 << winners[cLevel].params.windowLog) + ZSTD_estimateCCtxSize(winners[cLevel].params);
size_t W_CMemUsed = (1 << params.windowLog) + ZSTD_estimateCCtxSize_advanced(params);
size_t O_CMemUsed = (1 << winners[cLevel].params.windowLog) + ZSTD_estimateCCtxSize_advanced(winners[cLevel].params);
double W_CMemUsed_note = W_ratioNote * ( 50 + 13*cLevel) - log((double)W_CMemUsed);
double O_CMemUsed_note = O_ratioNote * ( 50 + 13*cLevel) - log((double)O_CMemUsed);
@ -454,7 +456,7 @@ static ZSTD_compressionParameters* sanitizeParams(ZSTD_compressionParameters par
g_params.chainLog = 0, g_params.searchLog = 0;
if (params.strategy == ZSTD_dfast)
g_params.searchLog = 0;
if (params.strategy != ZSTD_btopt && params.strategy != ZSTD_btopt2)
if (params.strategy != ZSTD_btopt && params.strategy != ZSTD_btultra)
g_params.targetLength = 0;
return &g_params;
}
@ -558,7 +560,7 @@ static ZSTD_compressionParameters randomParams(void)
p.windowLog = FUZ_rand(&g_rand) % (ZSTD_WINDOWLOG_MAX+1 - ZSTD_WINDOWLOG_MIN) + ZSTD_WINDOWLOG_MIN;
p.searchLength=FUZ_rand(&g_rand) % (ZSTD_SEARCHLENGTH_MAX+1 - ZSTD_SEARCHLENGTH_MIN) + ZSTD_SEARCHLENGTH_MIN;
p.targetLength=FUZ_rand(&g_rand) % (ZSTD_TARGETLENGTH_MAX+1 - ZSTD_TARGETLENGTH_MIN) + ZSTD_TARGETLENGTH_MIN;
p.strategy = (ZSTD_strategy) (FUZ_rand(&g_rand) % (ZSTD_btopt2 +1));
p.strategy = (ZSTD_strategy) (FUZ_rand(&g_rand) % (ZSTD_btultra +1));
validated = !ZSTD_isError(ZSTD_checkCParams(p));
}
return p;

View File

@ -92,7 +92,7 @@ $ZSTD tmp --stdout > tmpCompressed # long command format
$ECHO "test : compress to named file"
rm tmpCompressed
$ZSTD tmp -o tmpCompressed
ls tmpCompressed # must work
test -f tmpCompressed # file must be created
$ECHO "test : -o must be followed by filename (must fail)"
$ZSTD tmp -of tmpCompressed && die "-o must be followed by filename "
$ECHO "test : force write, correct order"
@ -142,21 +142,21 @@ $ZSTD -q -f tmpro
rm -f tmpro tmpro.zst
$ECHO "test : file removal"
$ZSTD -f --rm tmp
ls tmp && die "tmp should no longer be present"
test ! -f tmp # tmp should no longer be present
$ZSTD -f -d --rm tmp.zst
ls tmp.zst && die "tmp.zst should no longer be present"
test ! -f tmp.zst # tmp.zst should no longer be present
$ECHO "test : --rm on stdin"
$ECHO a | $ZSTD --rm > $INTOVOID # --rm should remain silent
rm tmp
$ZSTD -f tmp && die "tmp not present : should have failed"
ls tmp.zst && die "tmp.zst should not be created"
test ! -f tmp.zst # tmp.zst should not be created
$ECHO "\n**** Advanced compression parameters **** "
$ECHO "Hello world!" | $ZSTD --zstd=windowLog=21, - -o tmp.zst && die "wrong parameters not detected!"
$ECHO "Hello world!" | $ZSTD --zstd=windowLo=21 - -o tmp.zst && die "wrong parameters not detected!"
$ECHO "Hello world!" | $ZSTD --zstd=windowLog=21,slog - -o tmp.zst && die "wrong parameters not detected!"
ls tmp.zst && die "tmp.zst should not be created"
test ! -f tmp.zst # tmp.zst should not be created
roundTripTest -g512K
roundTripTest -g512K " --zstd=slen=3,tlen=48,strat=6"
roundTripTest -g512K " --zstd=strat=6,wlog=23,clog=23,hlog=22,slog=6"
@ -201,16 +201,17 @@ $ECHO foo | $ZSTD > /dev/full && die "write error not detected!"
$ECHO "$ECHO foo | $ZSTD | $ZSTD -d > /dev/full"
$ECHO foo | $ZSTD | $ZSTD -d > /dev/full && die "write error not detected!"
$ECHO "\n**** symbolic link test **** "
rm -f hello.tmp world.tmp hello.tmp.zst world.tmp.zst
$ECHO "hello world" > hello.tmp
ln -s hello.tmp world.tmp
$ZSTD world.tmp hello.tmp
ls hello.tmp.zst || die "regular file should have been compressed!"
ls world.tmp.zst && die "symbolic link should not have been compressed!"
test -f hello.tmp.zst # regular file should have been compressed!
test ! -f world.tmp.zst # symbolic link should not have been compressed!
$ZSTD world.tmp hello.tmp -f
ls world.tmp.zst || die "symbol link should have been compressed with --force"
test -f world.tmp.zst # symbolic link should have been compressed with --force
rm -f hello.tmp world.tmp hello.tmp.zst world.tmp.zst
fi
@ -225,10 +226,10 @@ $ZSTD tmpSparse -c | $ZSTD -dv --sparse -c > tmpOutSparse
$DIFF -s tmpSparse tmpOutSparse
$ZSTD tmpSparse -c | $ZSTD -dv --no-sparse -c > tmpOutNoSparse
$DIFF -s tmpSparse tmpOutNoSparse
ls -ls tmpSparse*
ls -ls tmpSparse* # look at file size and block size on disk
./datagen -s1 -g1200007 -P100 | $ZSTD | $ZSTD -dv --sparse -c > tmpSparseOdd # Odd size file (to not finish on an exact nb of blocks)
./datagen -s1 -g1200007 -P100 | $DIFF -s - tmpSparseOdd
ls -ls tmpSparseOdd
ls -ls tmpSparseOdd # look at file size and block size on disk
$ECHO "\n Sparse Compatibility with Console :"
$ECHO "Hello World 1 !" | $ZSTD | $ZSTD -d -c
$ECHO "Hello World 2 !" | $ZSTD | $ZSTD -d | cat
@ -238,7 +239,7 @@ cat tmpSparse1M tmpSparse1M > tmpSparse2M
$ZSTD -v -f tmpSparse1M -o tmpSparseCompressed
$ZSTD -d -v -f tmpSparseCompressed -o tmpSparseRegenerated
$ZSTD -d -v -f tmpSparseCompressed -c >> tmpSparseRegenerated
ls -ls tmpSparse*
ls -ls tmpSparse* # look at file size and block size on disk
$DIFF tmpSparse2M tmpSparseRegenerated
rm tmpSparse*
@ -257,11 +258,11 @@ $ZSTD -df *.zst
ls -ls tmp*
$ECHO "compress tmp* into stdout > tmpall : "
$ZSTD -c tmp1 tmp2 tmp3 > tmpall
ls -ls tmp*
ls -ls tmp* # check size of tmpall (should be tmp1.zst + tmp2.zst + tmp3.zst)
$ECHO "decompress tmpall* into stdout > tmpdec : "
cp tmpall tmpall2
$ZSTD -dc tmpall* > tmpdec
ls -ls tmp*
ls -ls tmp* # check size of tmpdec (should be 2*(tmp1 + tmp2 + tmp3))
$ECHO "compress multiple files including a missing one (notHere) : "
$ZSTD -f tmp1 notHere tmp2 && die "missing file not detected!"
@ -379,7 +380,9 @@ $ZSTD -t tmp2.zst && die "bad file not detected !"
$ZSTD -t tmp3 && die "bad file not detected !" # detects 0-sized files as bad
$ECHO "test --rm and --test combined "
$ZSTD -t --rm tmp1.zst
ls -ls tmp1.zst # check file is still present
test -f tmp1.zst # check file is still present
split -b16384 tmp1.zst tmpSplit.
$ZSTD -t tmpSplit.* && die "bad file not detected !"
$ECHO "\n**** benchmark mode tests **** "
@ -439,6 +442,7 @@ if [ $LZMAMODE -eq 1 ]; then
XZEXE=1
xz -V && lzma -V || XZEXE=0
if [ $XZEXE -eq 1 ]; then
$ECHO "Testing zstd xz and lzma support"
./datagen > tmp
$ZSTD --format=lzma -f tmp
$ZSTD --format=xz -f tmp
@ -449,6 +453,24 @@ if [ $LZMAMODE -eq 1 ]; then
$ZSTD -d -f -v tmp.xz
$ZSTD -d -f -v tmp.lzma
rm tmp*
$ECHO "Creating symlinks"
ln -s $ZSTD ./xz
ln -s $ZSTD ./unxz
ln -s $ZSTD ./lzma
ln -s $ZSTD ./unlzma
$ECHO "Testing xz and lzma symlinks"
./datagen > tmp
./xz tmp
xz -d tmp.xz
./lzma tmp
lzma -d tmp.lzma
$ECHO "Testing unxz and unlzma symlinks"
xz tmp
./xz -d tmp.xz
lzma tmp
./lzma -d tmp.lzma
rm xz unxz lzma unlzma
rm tmp*
else
$ECHO "xz binary not detected"
fi
@ -534,6 +556,54 @@ fi
rm tmp*
$ECHO "\n**** zstd --list/-l single frame tests ****"
./datagen > tmp1
./datagen > tmp2
./datagen > tmp3
./datagen > tmp4
$ZSTD tmp*
$ZSTD -l *.zst
$ZSTD -lv *.zst
$ZSTD --list *.zst
$ZSTD --list -v *.zst
$ECHO "\n**** zstd --list/-l multiple frame tests ****"
cat tmp1.zst tmp2.zst > tmp12.zst
cat tmp3.zst tmp4.zst > tmp34.zst
cat tmp12.zst tmp34.zst > tmp1234.zst
cat tmp12.zst tmp4.zst > tmp124.zst
$ZSTD -l *.zst
$ZSTD -lv *.zst
$ZSTD --list *.zst
$ZSTD --list -v *.zst
$ECHO "\n**** zstd --list/-l error detection tests ****"
! $ZSTD -l tmp1 tmp1.zst
! $ZSTD --list tmp*
! $ZSTD -lv tmp1*
! $ZSTD --list -v tmp2 tmp23.zst
$ECHO "\n**** zstd --list/-l test with null files ****"
./datagen -g0 > tmp5
$ZSTD tmp5
$ZSTD -l tmp5.zst
! $ZSTD -l tmp5*
$ZSTD -lv tmp5.zst
! $ZSTD -lv tmp5*
$ECHO "\n**** zstd --list/-l test with no content size field ****"
./datagen -g1MB | $ZSTD > tmp6.zst
$ZSTD -l tmp6.zst
$ZSTD -lv tmp6.zst
$ECHO "\n**** zstd --list/-l test with no checksum ****"
$ZSTD -f --no-check tmp1
$ZSTD -l tmp1.zst
$ZSTD -lv tmp1.zst
rm tmp*
if [ "$1" != "--test-large-data" ]; then
$ECHO "Skipping large data tests"
exit 0

View File

@ -68,13 +68,20 @@ static size_t checkBuffers(const void* buff1, const void* buff2, size_t buffSize
return pos;
}
static void crash(int errorCode){
/* abort if AFL/libfuzzer, exit otherwise */
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* could also use __AFL_COMPILER */
abort();
#else
exit(errorCode);
#endif
}
static void roundTripCheck(const void* srcBuff, size_t srcBuffSize)
{
size_t const cBuffSize = ZSTD_compressBound(srcBuffSize);
void* cBuff = malloc(cBuffSize);
void* rBuff = malloc(cBuffSize);
#define CRASH { free(cBuff); free(cBuff); } /* double free, to crash program */
if (!cBuff || !rBuff) {
fprintf(stderr, "not enough memory ! \n");
@ -84,15 +91,15 @@ static void roundTripCheck(const void* srcBuff, size_t srcBuffSize)
{ size_t const result = roundTripTest(rBuff, cBuffSize, cBuff, cBuffSize, srcBuff, srcBuffSize);
if (ZSTD_isError(result)) {
fprintf(stderr, "roundTripTest error : %s \n", ZSTD_getErrorName(result));
CRASH;
crash(1);
}
if (result != srcBuffSize) {
fprintf(stderr, "Incorrect regenerated size : %u != %u\n", (unsigned)result, (unsigned)srcBuffSize);
CRASH;
crash(1);
}
if (checkBuffers(srcBuff, rBuff, srcBuffSize) != srcBuffSize) {
fprintf(stderr, "Silent decoding corruption !!!");
CRASH;
crash(1);
}
}

View File

@ -88,14 +88,14 @@ static const void *symbols[] = {
&ZSTD_copyCCtx,
&ZSTD_compressContinue,
&ZSTD_compressEnd,
&ZSTD_getFrameParams,
&ZSTD_getFrameHeader,
&ZSTD_decompressBegin,
&ZSTD_decompressBegin_usingDict,
&ZSTD_copyDCtx,
&ZSTD_nextSrcSizeToDecompress,
&ZSTD_decompressContinue,
&ZSTD_nextInputType,
&ZSTD_getBlockSizeMax,
&ZSTD_getBlockSize,
&ZSTD_compressBlock,
&ZSTD_decompressBlock,
&ZSTD_insertBlock,
@ -131,7 +131,10 @@ static const void *symbols[] = {
&ZDICT_isError,
&ZDICT_getErrorName,
/* zdict.h: advanced functions */
&ZDICT_trainFromBuffer_advanced,
&ZDICT_trainFromBuffer_cover,
&ZDICT_optimizeTrainFromBuffer_cover,
&ZDICT_finalizeDictionary,
&ZDICT_trainFromBuffer_legacy,
&ZDICT_addEntropyTablesFromBuffer,
NULL,
};

File diff suppressed because it is too large Load Diff

View File

@ -18,9 +18,12 @@ EXAMPLE_PATH = examples
PROGRAMS_PATH = ../programs
TEST_FILE = ../doc/zstd_compression_format.md
CPPFLAGS = -DXXH_NAMESPACE=ZSTD_ -I$(ZLIB_PATH) -I$(PROGRAMS_PATH) -I$(ZSTDLIBDIR) -I$(ZSTDLIBDIR)/common -I$(ZLIBWRAPPER_PATH)
CPPFLAGS = -DXXH_NAMESPACE=ZSTD_ -I$(ZLIB_PATH) -I$(PROGRAMS_PATH) \
-I$(ZSTDLIBDIR) -I$(ZSTDLIBDIR)/common -I$(ZLIBWRAPPER_PATH)
CFLAGS ?= $(MOREFLAGS) -O3 -std=gnu99
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wswitch-enum -Wdeclaration-after-statement -Wstrict-prototypes -Wundef -Wstrict-aliasing=1
CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wswitch-enum \
-Wdeclaration-after-statement -Wstrict-prototypes -Wundef \
-Wstrict-aliasing=1
# Define *.exe as extension for Windows systems
@ -48,8 +51,8 @@ test: example fitblk example_zstd fitblk_zstd zwrapbench minigzip minigzip_zstd
#cp example$(EXT).gz example$(EXT)_gz.gz
./minigzip_zstd -d example$(EXT).gz
@echo ---- minigzip end ----
./zwrapbench -qb3B1K $(TEST_FILE)
./zwrapbench -rqb1e5 ../lib ../programs ../tests
./zwrapbench -qi1b3B1K $(TEST_FILE)
./zwrapbench -rqi1b1e5 ../lib ../programs ../tests
#valgrindTest: ZSTDLIBRARY = $(ZSTDLIBDIR)/libzstd.so
valgrindTest: VALGRIND = LD_LIBRARY_PATH=$(ZSTDLIBDIR) valgrind --track-origins=yes --leak-check=full --error-exitcode=1
@ -61,8 +64,8 @@ valgrindTest: clean example fitblk example_zstd fitblk_zstd zwrapbench
$(VALGRIND) ./fitblk 40960 <$(TEST_FILE)
$(VALGRIND) ./fitblk_zstd 10240 <$(TEST_FILE)
$(VALGRIND) ./fitblk_zstd 40960 <$(TEST_FILE)
$(VALGRIND) ./zwrapbench -qb3B1K $(TEST_FILE)
$(VALGRIND) ./zwrapbench -rqb1e5 ../lib ../programs ../tests
$(VALGRIND) ./zwrapbench -qi1b3B1K $(TEST_FILE)
$(VALGRIND) ./zwrapbench -rqi1b1e5 ../lib ../programs ../tests
#.c.o:
# $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<

View File

@ -90,7 +90,7 @@ static clock_t g_time = 0;
#ifndef DEBUG
# define DEBUG 0
#endif
#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__);
#define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); }
#define EXM_THROW(error, ...) \
{ \
DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
@ -236,7 +236,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
if (compressor == BMK_ZSTD) {
ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
ZSTD_customMem const cmem = { NULL, NULL, NULL };
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1, zparams.cParams, cmem);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1 /*byRef*/, ZSTD_dm_auto, zparams.cParams, cmem);
if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
do {

View File

@ -8,35 +8,42 @@
*/
/* === Tuning parameters === */
#ifndef ZWRAP_USE_ZSTD
#define ZWRAP_USE_ZSTD 0
#endif
/* === Dependencies === */
#include <stdlib.h>
#include <stdio.h> /* vsprintf */
#include <stdarg.h> /* va_list, for z_gzprintf */
#define NO_DUMMY_DECL
#define ZLIB_CONST
#include <zlib.h> /* without #define Z_PREFIX */
#include "zstd_zlibwrapper.h"
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_MAGICNUMBER */
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_isFrame, ZSTD_MAGICNUMBER */
#include "zstd.h"
#include "zstd_internal.h" /* defaultCustomMem */
#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
/* === Constants === */
#define Z_INFLATE_SYNC 8
#define ZLIB_HEADERSIZE 4
#define ZSTD_HEADERSIZE ZSTD_frameHeaderSize_min
#define ZWRAP_DEFAULT_CLEVEL 3 /* Z_DEFAULT_COMPRESSION is translated to ZWRAP_DEFAULT_CLEVEL for zstd */
#define LOG_WRAPPERC(...) /* printf(__VA_ARGS__) */
#define LOG_WRAPPERD(...) /* printf(__VA_ARGS__) */
/* === Debug === */
#define LOG_WRAPPERC(...) /* fprintf(stderr, __VA_ARGS__) */
#define LOG_WRAPPERD(...) /* fprintf(stderr, __VA_ARGS__) */
#define FINISH_WITH_GZ_ERR(msg) { (void)msg; return Z_STREAM_ERROR; }
#define FINISH_WITH_NULL_ERR(msg) { (void)msg; return NULL; }
#ifndef ZWRAP_USE_ZSTD
#define ZWRAP_USE_ZSTD 0
#endif
static int g_ZWRAP_useZSTDcompression = ZWRAP_USE_ZSTD; /* 0 = don't use ZSTD */
/* === Wrapper === */
static int g_ZWRAP_useZSTDcompression = ZWRAP_USE_ZSTD; /* 0 = don't use ZSTD */
void ZWRAP_useZSTDcompression(int turn_on) { g_ZWRAP_useZSTDcompression = turn_on; }
@ -62,7 +69,7 @@ static void* ZWRAP_allocFunction(void* opaque, size_t size)
{
z_streamp strm = (z_streamp) opaque;
void* address = strm->zalloc(strm->opaque, 1, (uInt)size);
/* printf("ZWRAP alloc %p, %d \n", address, (int)size); */
/* LOG_WRAPPERC("ZWRAP alloc %p, %d \n", address, (int)size); */
return address;
}
@ -70,12 +77,12 @@ static void ZWRAP_freeFunction(void* opaque, void* address)
{
z_streamp strm = (z_streamp) opaque;
strm->zfree(strm->opaque, address);
/* if (address) printf("ZWRAP free %p \n", address); */
/* if (address) LOG_WRAPPERC("ZWRAP free %p \n", address); */
}
/* *** Compression *** */
/* === Compression === */
typedef enum { ZWRAP_useInit, ZWRAP_useReset, ZWRAP_streamEnd } ZWRAP_state_t;
typedef struct {
@ -95,16 +102,16 @@ typedef ZWRAP_CCtx internal_state;
size_t ZWRAP_freeCCtx(ZWRAP_CCtx* zwc)
static size_t ZWRAP_freeCCtx(ZWRAP_CCtx* zwc)
{
if (zwc==NULL) return 0; /* support free on NULL */
if (zwc->zbc) ZSTD_freeCStream(zwc->zbc);
zwc->customMem.customFree(zwc->customMem.opaque, zwc);
ZSTD_freeCStream(zwc->zbc);
ZSTD_free(zwc, zwc->customMem);
return 0;
}
ZWRAP_CCtx* ZWRAP_createCCtx(z_streamp strm)
static ZWRAP_CCtx* ZWRAP_createCCtx(z_streamp strm)
{
ZWRAP_CCtx* zwc;
@ -113,37 +120,36 @@ ZWRAP_CCtx* ZWRAP_createCCtx(z_streamp strm)
if (zwc==NULL) return NULL;
memset(zwc, 0, sizeof(ZWRAP_CCtx));
memcpy(&zwc->allocFunc, strm, sizeof(z_stream));
{ ZSTD_customMem ZWRAP_customMem = { ZWRAP_allocFunction, ZWRAP_freeFunction, &zwc->allocFunc };
memcpy(&zwc->customMem, &ZWRAP_customMem, sizeof(ZSTD_customMem));
}
{ ZSTD_customMem const ZWRAP_customMem = { ZWRAP_allocFunction, ZWRAP_freeFunction, &zwc->allocFunc };
zwc->customMem = ZWRAP_customMem; }
} else {
zwc = (ZWRAP_CCtx*)defaultCustomMem.customAlloc(defaultCustomMem.opaque, sizeof(ZWRAP_CCtx));
zwc = (ZWRAP_CCtx*)calloc(1, sizeof(*zwc));
if (zwc==NULL) return NULL;
memset(zwc, 0, sizeof(ZWRAP_CCtx));
memcpy(&zwc->customMem, &defaultCustomMem, sizeof(ZSTD_customMem));
}
return zwc;
}
int ZWRAP_initializeCStream(ZWRAP_CCtx* zwc, const void* dict, size_t dictSize, unsigned long long pledgedSrcSize)
static int ZWRAP_initializeCStream(ZWRAP_CCtx* zwc, const void* dict, size_t dictSize, unsigned long long pledgedSrcSize)
{
LOG_WRAPPERC("- ZWRAP_initializeCStream=%p\n", zwc);
if (zwc == NULL || zwc->zbc == NULL) return Z_STREAM_ERROR;
if (!pledgedSrcSize) pledgedSrcSize = zwc->pledgedSrcSize;
{ ZSTD_parameters const params = ZSTD_getParams(zwc->compressionLevel, pledgedSrcSize, dictSize);
size_t errorCode;
LOG_WRAPPERC("pledgedSrcSize=%d windowLog=%d chainLog=%d hashLog=%d searchLog=%d searchLength=%d strategy=%d\n", (int)pledgedSrcSize, params.cParams.windowLog, params.cParams.chainLog, params.cParams.hashLog, params.cParams.searchLog, params.cParams.searchLength, params.cParams.strategy);
errorCode = ZSTD_initCStream_advanced(zwc->zbc, dict, dictSize, params, pledgedSrcSize);
if (ZSTD_isError(errorCode)) return Z_STREAM_ERROR; }
{ ZSTD_parameters const params = ZSTD_getParams(zwc->compressionLevel, pledgedSrcSize, dictSize);
size_t initErr;
LOG_WRAPPERC("pledgedSrcSize=%d windowLog=%d chainLog=%d hashLog=%d searchLog=%d searchLength=%d strategy=%d\n",
(int)pledgedSrcSize, params.cParams.windowLog, params.cParams.chainLog, params.cParams.hashLog, params.cParams.searchLog, params.cParams.searchLength, params.cParams.strategy);
initErr = ZSTD_initCStream_advanced(zwc->zbc, dict, dictSize, params, pledgedSrcSize);
if (ZSTD_isError(initErr)) return Z_STREAM_ERROR;
}
return Z_OK;
}
int ZWRAPC_finishWithError(ZWRAP_CCtx* zwc, z_streamp strm, int error)
static int ZWRAPC_finishWithError(ZWRAP_CCtx* zwc, z_streamp strm, int error)
{
LOG_WRAPPERC("- ZWRAPC_finishWithError=%d\n", error);
if (zwc) ZWRAP_freeCCtx(zwc);
@ -152,7 +158,7 @@ int ZWRAPC_finishWithError(ZWRAP_CCtx* zwc, z_streamp strm, int error)
}
int ZWRAPC_finishWithErrorMsg(z_streamp strm, char* message)
static int ZWRAPC_finishWithErrorMsg(z_streamp strm, char* message)
{
ZWRAP_CCtx* zwc = (ZWRAP_CCtx*) strm->state;
strm->msg = message;
@ -219,7 +225,7 @@ int ZWRAP_deflateReset_keepDict(z_streamp strm)
return deflateReset(strm);
{ ZWRAP_CCtx* zwc = (ZWRAP_CCtx*) strm->state;
if (zwc) {
if (zwc) {
zwc->streamEnd = 0;
zwc->totalInBytes = 0;
}
@ -277,34 +283,36 @@ ZEXTERN int ZEXPORT z_deflate OF((z_streamp strm, int flush))
ZWRAP_CCtx* zwc;
if (!g_ZWRAP_useZSTDcompression) {
int res;
LOG_WRAPPERC("- deflate1 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
res = deflate(strm, flush);
return res;
LOG_WRAPPERC("- deflate1 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d\n",
(int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
return deflate(strm, flush);
}
zwc = (ZWRAP_CCtx*) strm->state;
if (zwc == NULL) { LOG_WRAPPERC("zwc == NULL\n"); return Z_STREAM_ERROR; }
if (zwc->zbc == NULL) {
int res;
zwc->zbc = ZSTD_createCStream_advanced(zwc->customMem);
if (zwc->zbc == NULL) return ZWRAPC_finishWithError(zwc, strm, 0);
res = ZWRAP_initializeCStream(zwc, NULL, 0, (flush == Z_FINISH) ? strm->avail_in : 0);
if (res != Z_OK) return ZWRAPC_finishWithError(zwc, strm, res);
{ int const initErr = ZWRAP_initializeCStream(zwc, NULL, 0, (flush == Z_FINISH) ? strm->avail_in : 0);
if (initErr != Z_OK) return ZWRAPC_finishWithError(zwc, strm, initErr); }
if (flush != Z_FINISH) zwc->comprState = ZWRAP_useReset;
} else {
if (zwc->totalInBytes == 0) {
if (zwc->comprState == ZWRAP_useReset) {
size_t const errorCode = ZSTD_resetCStream(zwc->zbc, (flush == Z_FINISH) ? strm->avail_in : zwc->pledgedSrcSize);
if (ZSTD_isError(errorCode)) { LOG_WRAPPERC("ERROR: ZSTD_resetCStream errorCode=%s\n", ZSTD_getErrorName(errorCode)); return ZWRAPC_finishWithError(zwc, strm, 0); }
size_t const resetErr = ZSTD_resetCStream(zwc->zbc, (flush == Z_FINISH) ? strm->avail_in : zwc->pledgedSrcSize);
if (ZSTD_isError(resetErr)) {
LOG_WRAPPERC("ERROR: ZSTD_resetCStream errorCode=%s\n",
ZSTD_getErrorName(resetErr));
return ZWRAPC_finishWithError(zwc, strm, 0);
}
} else {
int res = ZWRAP_initializeCStream(zwc, NULL, 0, (flush == Z_FINISH) ? strm->avail_in : 0);
int const res = ZWRAP_initializeCStream(zwc, NULL, 0, (flush == Z_FINISH) ? strm->avail_in : 0);
if (res != Z_OK) return ZWRAPC_finishWithError(zwc, strm, res);
if (flush != Z_FINISH) zwc->comprState = ZWRAP_useReset;
}
}
}
} /* (zwc->totalInBytes == 0) */
} /* ! (zwc->zbc == NULL) */
LOG_WRAPPERC("- deflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
if (strm->avail_in > 0) {
@ -314,9 +322,9 @@ ZEXTERN int ZEXPORT z_deflate OF((z_streamp strm, int flush))
zwc->outBuffer.dst = strm->next_out;
zwc->outBuffer.size = strm->avail_out;
zwc->outBuffer.pos = 0;
{ size_t const errorCode = ZSTD_compressStream(zwc->zbc, &zwc->outBuffer, &zwc->inBuffer);
{ size_t const cErr = ZSTD_compressStream(zwc->zbc, &zwc->outBuffer, &zwc->inBuffer);
LOG_WRAPPERC("deflate ZSTD_compressStream srcSize=%d dstCapacity=%d\n", (int)zwc->inBuffer.size, (int)zwc->outBuffer.size);
if (ZSTD_isError(errorCode)) return ZWRAPC_finishWithError(zwc, strm, 0);
if (ZSTD_isError(cErr)) return ZWRAPC_finishWithError(zwc, strm, 0);
}
strm->next_out += zwc->outBuffer.pos;
strm->total_out += zwc->outBuffer.pos;
@ -346,8 +354,12 @@ ZEXTERN int ZEXPORT z_deflate OF((z_streamp strm, int flush))
strm->next_out += zwc->outBuffer.pos;
strm->total_out += zwc->outBuffer.pos;
strm->avail_out -= zwc->outBuffer.pos;
if (bytesLeft == 0) { zwc->streamEnd = 1; LOG_WRAPPERC("Z_STREAM_END2 strm->total_in=%d strm->avail_out=%d strm->total_out=%d\n", (int)strm->total_in, (int)strm->avail_out, (int)strm->total_out); return Z_STREAM_END; }
}
if (bytesLeft == 0) {
zwc->streamEnd = 1;
LOG_WRAPPERC("Z_STREAM_END2 strm->total_in=%d strm->avail_out=%d strm->total_out=%d\n",
(int)strm->total_in, (int)strm->avail_out, (int)strm->total_out);
return Z_STREAM_END;
} }
else
if (flush == Z_SYNC_FLUSH || flush == Z_PARTIAL_FLUSH) {
size_t bytesLeft;
@ -410,12 +422,13 @@ ZEXTERN int ZEXPORT z_deflateParams OF((z_streamp strm,
/* *** Decompression *** */
/* === Decompression === */
typedef enum { ZWRAP_ZLIB_STREAM, ZWRAP_ZSTD_STREAM, ZWRAP_UNKNOWN_STREAM } ZWRAP_stream_type;
typedef struct {
ZSTD_DStream* zbd;
char headerBuf[16]; /* should be equal or bigger than ZSTD_frameHeaderSize_min */
char headerBuf[16]; /* must be >= ZSTD_frameHeaderSize_min */
int errorCount;
unsigned long long totalInBytes; /* we need it as strm->total_in can be reset by user */
ZWRAP_state_t decompState;
@ -427,10 +440,48 @@ typedef struct {
char *version;
int windowBits;
ZSTD_customMem customMem;
z_stream allocFunc; /* copy of zalloc, zfree, opaque */
z_stream allocFunc; /* just to copy zalloc, zfree, opaque */
} ZWRAP_DCtx;
static void ZWRAP_initDCtx(ZWRAP_DCtx* zwd)
{
zwd->errorCount = 0;
zwd->outBuffer.pos = 0;
zwd->outBuffer.size = 0;
}
static ZWRAP_DCtx* ZWRAP_createDCtx(z_streamp strm)
{
ZWRAP_DCtx* zwd;
MEM_STATIC_ASSERT(sizeof(zwd->headerBuf) >= ZSTD_FRAMEHEADERSIZE_MIN); /* check static buffer size condition */
if (strm->zalloc && strm->zfree) {
zwd = (ZWRAP_DCtx*)strm->zalloc(strm->opaque, 1, sizeof(ZWRAP_DCtx));
if (zwd==NULL) return NULL;
memset(zwd, 0, sizeof(ZWRAP_DCtx));
zwd->allocFunc = *strm; /* just to copy zalloc, zfree & opaque */
{ ZSTD_customMem const ZWRAP_customMem = { ZWRAP_allocFunction, ZWRAP_freeFunction, &zwd->allocFunc };
zwd->customMem = ZWRAP_customMem; }
} else {
zwd = (ZWRAP_DCtx*)calloc(1, sizeof(*zwd));
if (zwd==NULL) return NULL;
}
ZWRAP_initDCtx(zwd);
return zwd;
}
static size_t ZWRAP_freeDCtx(ZWRAP_DCtx* zwd)
{
if (zwd==NULL) return 0; /* support free on null */
ZSTD_freeDStream(zwd->zbd);
ZSTD_free(zwd->version, zwd->customMem);
ZSTD_free(zwd, zwd->customMem);
return 0;
}
int ZWRAP_isUsingZSTDdecompression(z_streamp strm)
{
if (strm == NULL) return 0;
@ -438,61 +489,17 @@ int ZWRAP_isUsingZSTDdecompression(z_streamp strm)
}
void ZWRAP_initDCtx(ZWRAP_DCtx* zwd)
{
zwd->errorCount = 0;
zwd->outBuffer.pos = 0;
zwd->outBuffer.size = 0;
}
ZWRAP_DCtx* ZWRAP_createDCtx(z_streamp strm)
{
ZWRAP_DCtx* zwd;
if (strm->zalloc && strm->zfree) {
zwd = (ZWRAP_DCtx*)strm->zalloc(strm->opaque, 1, sizeof(ZWRAP_DCtx));
if (zwd==NULL) return NULL;
memset(zwd, 0, sizeof(ZWRAP_DCtx));
memcpy(&zwd->allocFunc, strm, sizeof(z_stream));
{ ZSTD_customMem ZWRAP_customMem = { ZWRAP_allocFunction, ZWRAP_freeFunction, &zwd->allocFunc };
memcpy(&zwd->customMem, &ZWRAP_customMem, sizeof(ZSTD_customMem));
}
} else {
zwd = (ZWRAP_DCtx*)defaultCustomMem.customAlloc(defaultCustomMem.opaque, sizeof(ZWRAP_DCtx));
if (zwd==NULL) return NULL;
memset(zwd, 0, sizeof(ZWRAP_DCtx));
memcpy(&zwd->customMem, &defaultCustomMem, sizeof(ZSTD_customMem));
}
MEM_STATIC_ASSERT(sizeof(zwd->headerBuf) >= ZSTD_FRAMEHEADERSIZE_MIN); /* if compilation fails here, assertion is false */
ZWRAP_initDCtx(zwd);
return zwd;
}
size_t ZWRAP_freeDCtx(ZWRAP_DCtx* zwd)
{
if (zwd==NULL) return 0; /* support free on null */
if (zwd->zbd) ZSTD_freeDStream(zwd->zbd);
if (zwd->version) zwd->customMem.customFree(zwd->customMem.opaque, zwd->version);
zwd->customMem.customFree(zwd->customMem.opaque, zwd);
return 0;
}
int ZWRAPD_finishWithError(ZWRAP_DCtx* zwd, z_streamp strm, int error)
static int ZWRAPD_finishWithError(ZWRAP_DCtx* zwd, z_streamp strm, int error)
{
LOG_WRAPPERD("- ZWRAPD_finishWithError=%d\n", error);
if (zwd) ZWRAP_freeDCtx(zwd);
if (strm) strm->state = NULL;
ZWRAP_freeDCtx(zwd);
strm->state = NULL;
return (error) ? error : Z_STREAM_ERROR;
}
int ZWRAPD_finishWithErrorMsg(z_streamp strm, char* message)
static int ZWRAPD_finishWithErrorMsg(z_streamp strm, char* message)
{
ZWRAP_DCtx* zwd = (ZWRAP_DCtx*) strm->state;
ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state;
strm->msg = message;
if (zwd == NULL) return Z_STREAM_ERROR;
@ -504,26 +511,25 @@ ZEXTERN int ZEXPORT z_inflateInit_ OF((z_streamp strm,
const char *version, int stream_size))
{
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB) {
strm->reserved = ZWRAP_ZLIB_STREAM; /* mark as zlib stream */
strm->reserved = ZWRAP_ZLIB_STREAM;
return inflateInit(strm);
}
{
ZWRAP_DCtx* zwd = ZWRAP_createDCtx(strm);
LOG_WRAPPERD("- inflateInit\n");
if (zwd == NULL) return ZWRAPD_finishWithError(zwd, strm, 0);
{ ZWRAP_DCtx* const zwd = ZWRAP_createDCtx(strm);
LOG_WRAPPERD("- inflateInit\n");
if (zwd == NULL) return ZWRAPD_finishWithError(zwd, strm, 0);
zwd->version = zwd->customMem.customAlloc(zwd->customMem.opaque, strlen(version) + 1);
if (zwd->version == NULL) return ZWRAPD_finishWithError(zwd, strm, 0);
strcpy(zwd->version, version);
zwd->version = ZSTD_malloc(strlen(version)+1, zwd->customMem);
if (zwd->version == NULL) return ZWRAPD_finishWithError(zwd, strm, 0);
strcpy(zwd->version, version);
zwd->stream_size = stream_size;
zwd->totalInBytes = 0;
strm->state = (struct internal_state*) zwd; /* use state which in not used by user */
strm->total_in = 0;
strm->total_out = 0;
strm->reserved = ZWRAP_UNKNOWN_STREAM; /* mark as unknown steam */
strm->adler = 0;
zwd->stream_size = stream_size;
zwd->totalInBytes = 0;
strm->state = (struct internal_state*) zwd;
strm->total_in = 0;
strm->total_out = 0;
strm->reserved = ZWRAP_UNKNOWN_STREAM;
strm->adler = 0;
}
return Z_OK;
@ -537,15 +543,14 @@ ZEXTERN int ZEXPORT z_inflateInit2_ OF((z_streamp strm, int windowBits,
return inflateInit2_(strm, windowBits, version, stream_size);
}
{
int ret = z_inflateInit_ (strm, version, stream_size);
LOG_WRAPPERD("- inflateInit2 windowBits=%d\n", windowBits);
if (ret == Z_OK) {
ZWRAP_DCtx* zwd = (ZWRAP_DCtx*)strm->state;
if (zwd == NULL) return Z_STREAM_ERROR;
zwd->windowBits = windowBits;
}
return ret;
{ int const ret = z_inflateInit_ (strm, version, stream_size);
LOG_WRAPPERD("- inflateInit2 windowBits=%d\n", windowBits);
if (ret == Z_OK) {
ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*)strm->state;
if (zwd == NULL) return Z_STREAM_ERROR;
zwd->windowBits = windowBits;
}
return ret;
}
}
@ -555,7 +560,7 @@ int ZWRAP_inflateReset_keepDict(z_streamp strm)
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved)
return inflateReset(strm);
{ ZWRAP_DCtx* zwd = (ZWRAP_DCtx*) strm->state;
{ ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state;
if (zwd == NULL) return Z_STREAM_ERROR;
ZWRAP_initDCtx(zwd);
zwd->decompState = ZWRAP_useReset;
@ -574,10 +579,10 @@ ZEXTERN int ZEXPORT z_inflateReset OF((z_streamp strm))
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved)
return inflateReset(strm);
{ int ret = ZWRAP_inflateReset_keepDict(strm);
{ int const ret = ZWRAP_inflateReset_keepDict(strm);
if (ret != Z_OK) return ret; }
{ ZWRAP_DCtx* zwd = (ZWRAP_DCtx*) strm->state;
{ ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state;
if (zwd == NULL) return Z_STREAM_ERROR;
zwd->decompState = ZWRAP_useInit; }
@ -592,9 +597,9 @@ ZEXTERN int ZEXPORT z_inflateReset2 OF((z_streamp strm,
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved)
return inflateReset2(strm, windowBits);
{ int ret = z_inflateReset (strm);
{ int const ret = z_inflateReset (strm);
if (ret == Z_OK) {
ZWRAP_DCtx* zwd = (ZWRAP_DCtx*)strm->state;
ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*)strm->state;
if (zwd == NULL) return Z_STREAM_ERROR;
zwd->windowBits = windowBits;
}
@ -612,11 +617,10 @@ ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm,
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved)
return inflateSetDictionary(strm, dictionary, dictLength);
{ size_t errorCode;
ZWRAP_DCtx* zwd = (ZWRAP_DCtx*) strm->state;
{ ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state;
if (zwd == NULL || zwd->zbd == NULL) return Z_STREAM_ERROR;
errorCode = ZSTD_initDStream_usingDict(zwd->zbd, dictionary, dictLength);
if (ZSTD_isError(errorCode)) return ZWRAPD_finishWithError(zwd, strm, 0);
{ size_t const initErr = ZSTD_initDStream_usingDict(zwd->zbd, dictionary, dictLength);
if (ZSTD_isError(initErr)) return ZWRAPD_finishWithError(zwd, strm, 0); }
zwd->decompState = ZWRAP_useReset;
if (zwd->totalInBytes == ZSTD_HEADERSIZE) {
@ -626,14 +630,14 @@ ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm,
zwd->outBuffer.dst = strm->next_out;
zwd->outBuffer.size = 0;
zwd->outBuffer.pos = 0;
errorCode = ZSTD_decompressStream(zwd->zbd, &zwd->outBuffer, &zwd->inBuffer);
LOG_WRAPPERD("inflateSetDictionary ZSTD_decompressStream errorCode=%d srcSize=%d dstCapacity=%d\n", (int)errorCode, (int)zwd->inBuffer.size, (int)zwd->outBuffer.size);
if (zwd->inBuffer.pos < zwd->outBuffer.size || ZSTD_isError(errorCode)) {
LOG_WRAPPERD("ERROR: ZSTD_decompressStream %s\n", ZSTD_getErrorName(errorCode));
return ZWRAPD_finishWithError(zwd, strm, 0);
}
}
}
{ size_t const errorCode = ZSTD_decompressStream(zwd->zbd, &zwd->outBuffer, &zwd->inBuffer);
LOG_WRAPPERD("inflateSetDictionary ZSTD_decompressStream errorCode=%d srcSize=%d dstCapacity=%d\n",
(int)errorCode, (int)zwd->inBuffer.size, (int)zwd->outBuffer.size);
if (zwd->inBuffer.pos < zwd->outBuffer.size || ZSTD_isError(errorCode)) {
LOG_WRAPPERD("ERROR: ZSTD_decompressStream %s\n",
ZSTD_getErrorName(errorCode));
return ZWRAPD_finishWithError(zwd, strm, 0);
} } } }
return Z_OK;
}
@ -642,156 +646,175 @@ ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm,
ZEXTERN int ZEXPORT z_inflate OF((z_streamp strm, int flush))
{
ZWRAP_DCtx* zwd;
int res;
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) {
LOG_WRAPPERD("- inflate1 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
res = inflate(strm, flush);
LOG_WRAPPERD("- inflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, res);
return res;
int const result = inflate(strm, flush);
LOG_WRAPPERD("- inflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n",
(int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, result);
return result;
}
if (strm->avail_in <= 0) return Z_OK;
{ size_t errorCode, srcSize;
zwd = (ZWRAP_DCtx*) strm->state;
LOG_WRAPPERD("- inflate1 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
zwd = (ZWRAP_DCtx*) strm->state;
LOG_WRAPPERD("- inflate1 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d\n",
(int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
if (zwd == NULL) return Z_STREAM_ERROR;
if (zwd->decompState == ZWRAP_streamEnd) return Z_STREAM_END;
if (zwd == NULL) return Z_STREAM_ERROR;
if (zwd->decompState == ZWRAP_streamEnd) return Z_STREAM_END;
if (zwd->totalInBytes < ZLIB_HEADERSIZE) {
if (zwd->totalInBytes == 0 && strm->avail_in >= ZLIB_HEADERSIZE) {
if (MEM_readLE32(strm->next_in) != ZSTD_MAGICNUMBER) {
if (zwd->windowBits)
errorCode = inflateInit2_(strm, zwd->windowBits, zwd->version, zwd->stream_size);
else
errorCode = inflateInit_(strm, zwd->version, zwd->stream_size);
strm->reserved = ZWRAP_ZLIB_STREAM; /* mark as zlib stream */
errorCode = ZWRAP_freeDCtx(zwd);
if (ZSTD_isError(errorCode)) goto error;
if (flush == Z_INFLATE_SYNC) res = inflateSync(strm);
else res = inflate(strm, flush);
LOG_WRAPPERD("- inflate3 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, res);
return res;
}
} else {
srcSize = MIN(strm->avail_in, ZLIB_HEADERSIZE - zwd->totalInBytes);
memcpy(zwd->headerBuf+zwd->totalInBytes, strm->next_in, srcSize);
strm->total_in += srcSize;
zwd->totalInBytes += srcSize;
strm->next_in += srcSize;
strm->avail_in -= srcSize;
if (zwd->totalInBytes < ZLIB_HEADERSIZE) return Z_OK;
if (MEM_readLE32(zwd->headerBuf) != ZSTD_MAGICNUMBER) {
z_stream strm2;
strm2.next_in = strm->next_in;
strm2.avail_in = strm->avail_in;
strm2.next_out = strm->next_out;
strm2.avail_out = strm->avail_out;
if (zwd->windowBits)
errorCode = inflateInit2_(strm, zwd->windowBits, zwd->version, zwd->stream_size);
else
errorCode = inflateInit_(strm, zwd->version, zwd->stream_size);
LOG_WRAPPERD("ZLIB inflateInit errorCode=%d\n", (int)errorCode);
if (errorCode != Z_OK) return ZWRAPD_finishWithError(zwd, strm, (int)errorCode);
/* inflate header */
strm->next_in = (unsigned char*)zwd->headerBuf;
strm->avail_in = ZLIB_HEADERSIZE;
strm->avail_out = 0;
errorCode = inflate(strm, Z_NO_FLUSH);
LOG_WRAPPERD("ZLIB inflate errorCode=%d strm->avail_in=%d\n", (int)errorCode, (int)strm->avail_in);
if (errorCode != Z_OK) return ZWRAPD_finishWithError(zwd, strm, (int)errorCode);
if (strm->avail_in > 0) goto error;
strm->next_in = strm2.next_in;
strm->avail_in = strm2.avail_in;
strm->next_out = strm2.next_out;
strm->avail_out = strm2.avail_out;
strm->reserved = ZWRAP_ZLIB_STREAM; /* mark as zlib stream */
errorCode = ZWRAP_freeDCtx(zwd);
if (ZSTD_isError(errorCode)) goto error;
if (flush == Z_INFLATE_SYNC) res = inflateSync(strm);
else res = inflate(strm, flush);
LOG_WRAPPERD("- inflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, res);
return res;
}
}
}
strm->reserved = ZWRAP_ZSTD_STREAM; /* mark as zstd steam */
if (flush == Z_INFLATE_SYNC) { strm->msg = "inflateSync is not supported!"; goto error; }
if (!zwd->zbd) {
zwd->zbd = ZSTD_createDStream_advanced(zwd->customMem);
if (zwd->zbd == NULL) { LOG_WRAPPERD("ERROR: ZSTD_createDStream_advanced\n"); goto error; }
zwd->decompState = ZWRAP_useInit;
}
if (zwd->totalInBytes < ZSTD_HEADERSIZE)
{
if (zwd->totalInBytes == 0 && strm->avail_in >= ZSTD_HEADERSIZE) {
if (zwd->decompState == ZWRAP_useInit) {
errorCode = ZSTD_initDStream(zwd->zbd);
if (ZSTD_isError(errorCode)) { LOG_WRAPPERD("ERROR: ZSTD_initDStream errorCode=%s\n", ZSTD_getErrorName(errorCode)); goto error; }
} else {
errorCode = ZSTD_resetDStream(zwd->zbd);
if (ZSTD_isError(errorCode)) goto error;
}
} else {
srcSize = MIN(strm->avail_in, ZSTD_HEADERSIZE - zwd->totalInBytes);
memcpy(zwd->headerBuf+zwd->totalInBytes, strm->next_in, srcSize);
strm->total_in += srcSize;
zwd->totalInBytes += srcSize;
strm->next_in += srcSize;
strm->avail_in -= srcSize;
if (zwd->totalInBytes < ZSTD_HEADERSIZE) return Z_OK;
if (zwd->decompState == ZWRAP_useInit) {
errorCode = ZSTD_initDStream(zwd->zbd);
if (ZSTD_isError(errorCode)) { LOG_WRAPPERD("ERROR: ZSTD_initDStream errorCode=%s\n", ZSTD_getErrorName(errorCode)); goto error; }
} else {
errorCode = ZSTD_resetDStream(zwd->zbd);
if (ZSTD_isError(errorCode)) goto error;
if (zwd->totalInBytes < ZLIB_HEADERSIZE) {
if (zwd->totalInBytes == 0 && strm->avail_in >= ZLIB_HEADERSIZE) {
if (MEM_readLE32(strm->next_in) != ZSTD_MAGICNUMBER) {
{ int const initErr = (zwd->windowBits) ?
inflateInit2_(strm, zwd->windowBits, zwd->version, zwd->stream_size) :
inflateInit_(strm, zwd->version, zwd->stream_size);
LOG_WRAPPERD("ZLIB inflateInit errorCode=%d\n", initErr);
if (initErr != Z_OK) return ZWRAPD_finishWithError(zwd, strm, initErr);
}
zwd->inBuffer.src = zwd->headerBuf;
zwd->inBuffer.size = ZSTD_HEADERSIZE;
zwd->inBuffer.pos = 0;
zwd->outBuffer.dst = strm->next_out;
zwd->outBuffer.size = 0;
zwd->outBuffer.pos = 0;
errorCode = ZSTD_decompressStream(zwd->zbd, &zwd->outBuffer, &zwd->inBuffer);
LOG_WRAPPERD("inflate ZSTD_decompressStream1 errorCode=%d srcSize=%d dstCapacity=%d\n", (int)errorCode, (int)zwd->inBuffer.size, (int)zwd->outBuffer.size);
if (ZSTD_isError(errorCode)) {
LOG_WRAPPERD("ERROR: ZSTD_decompressStream1 %s\n", ZSTD_getErrorName(errorCode));
strm->reserved = ZWRAP_ZLIB_STREAM;
{ size_t const freeErr = ZWRAP_freeDCtx(zwd);
if (ZSTD_isError(freeErr)) goto error; }
{ int const result = (flush == Z_INFLATE_SYNC) ?
inflateSync(strm) :
inflate(strm, flush);
LOG_WRAPPERD("- inflate3 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n",
(int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, res);
return result;
} }
} else { /* ! (zwd->totalInBytes == 0 && strm->avail_in >= ZLIB_HEADERSIZE) */
size_t const srcSize = MIN(strm->avail_in, ZLIB_HEADERSIZE - zwd->totalInBytes);
memcpy(zwd->headerBuf+zwd->totalInBytes, strm->next_in, srcSize);
strm->total_in += srcSize;
zwd->totalInBytes += srcSize;
strm->next_in += srcSize;
strm->avail_in -= srcSize;
if (zwd->totalInBytes < ZLIB_HEADERSIZE) return Z_OK;
if (MEM_readLE32(zwd->headerBuf) != ZSTD_MAGICNUMBER) {
z_stream strm2;
strm2.next_in = strm->next_in;
strm2.avail_in = strm->avail_in;
strm2.next_out = strm->next_out;
strm2.avail_out = strm->avail_out;
{ int const initErr = (zwd->windowBits) ?
inflateInit2_(strm, zwd->windowBits, zwd->version, zwd->stream_size) :
inflateInit_(strm, zwd->version, zwd->stream_size);
LOG_WRAPPERD("ZLIB inflateInit errorCode=%d\n", initErr);
if (initErr != Z_OK) return ZWRAPD_finishWithError(zwd, strm, initErr);
}
/* inflate header */
strm->next_in = (unsigned char*)zwd->headerBuf;
strm->avail_in = ZLIB_HEADERSIZE;
strm->avail_out = 0;
{ int const dErr = inflate(strm, Z_NO_FLUSH);
LOG_WRAPPERD("ZLIB inflate errorCode=%d strm->avail_in=%d\n",
dErr, (int)strm->avail_in);
if (dErr != Z_OK)
return ZWRAPD_finishWithError(zwd, strm, dErr);
}
if (strm->avail_in > 0) goto error;
strm->next_in = strm2.next_in;
strm->avail_in = strm2.avail_in;
strm->next_out = strm2.next_out;
strm->avail_out = strm2.avail_out;
strm->reserved = ZWRAP_ZLIB_STREAM; /* mark as zlib stream */
{ size_t const freeErr = ZWRAP_freeDCtx(zwd);
if (ZSTD_isError(freeErr)) goto error; }
{ int const result = (flush == Z_INFLATE_SYNC) ?
inflateSync(strm) :
inflate(strm, flush);
LOG_WRAPPERD("- inflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n",
(int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, res);
return result;
} } } /* if ! (zwd->totalInBytes == 0 && strm->avail_in >= ZLIB_HEADERSIZE) */
} /* (zwd->totalInBytes < ZLIB_HEADERSIZE) */
strm->reserved = ZWRAP_ZSTD_STREAM; /* mark as zstd steam */
if (flush == Z_INFLATE_SYNC) { strm->msg = "inflateSync is not supported!"; goto error; }
if (!zwd->zbd) {
zwd->zbd = ZSTD_createDStream_advanced(zwd->customMem);
if (zwd->zbd == NULL) { LOG_WRAPPERD("ERROR: ZSTD_createDStream_advanced\n"); goto error; }
zwd->decompState = ZWRAP_useInit;
}
if (zwd->totalInBytes < ZSTD_HEADERSIZE) {
if (zwd->totalInBytes == 0 && strm->avail_in >= ZSTD_HEADERSIZE) {
if (zwd->decompState == ZWRAP_useInit) {
size_t const initErr = ZSTD_initDStream(zwd->zbd);
if (ZSTD_isError(initErr)) {
LOG_WRAPPERD("ERROR: ZSTD_initDStream errorCode=%s\n",
ZSTD_getErrorName(initErr));
goto error;
}
if (zwd->inBuffer.pos != zwd->inBuffer.size) goto error; /* not consumed */
} else {
size_t const resetErr = ZSTD_resetDStream(zwd->zbd);
if (ZSTD_isError(resetErr)) goto error;
}
}
} else {
size_t const srcSize = MIN(strm->avail_in, ZSTD_HEADERSIZE - zwd->totalInBytes);
memcpy(zwd->headerBuf+zwd->totalInBytes, strm->next_in, srcSize);
strm->total_in += srcSize;
zwd->totalInBytes += srcSize;
strm->next_in += srcSize;
strm->avail_in -= srcSize;
if (zwd->totalInBytes < ZSTD_HEADERSIZE) return Z_OK;
zwd->inBuffer.src = strm->next_in;
zwd->inBuffer.size = strm->avail_in;
zwd->inBuffer.pos = 0;
zwd->outBuffer.dst = strm->next_out;
zwd->outBuffer.size = strm->avail_out;
zwd->outBuffer.pos = 0;
errorCode = ZSTD_decompressStream(zwd->zbd, &zwd->outBuffer, &zwd->inBuffer);
LOG_WRAPPERD("inflate ZSTD_decompressStream2 errorCode=%d srcSize=%d dstCapacity=%d\n", (int)errorCode, (int)strm->avail_in, (int)strm->avail_out);
if (ZSTD_isError(errorCode)) {
if (zwd->decompState == ZWRAP_useInit) {
size_t const initErr = ZSTD_initDStream(zwd->zbd);
if (ZSTD_isError(initErr)) {
LOG_WRAPPERD("ERROR: ZSTD_initDStream errorCode=%s\n",
ZSTD_getErrorName(initErr));
goto error;
}
} else {
size_t const resetErr = ZSTD_resetDStream(zwd->zbd);
if (ZSTD_isError(resetErr)) goto error;
}
zwd->inBuffer.src = zwd->headerBuf;
zwd->inBuffer.size = ZSTD_HEADERSIZE;
zwd->inBuffer.pos = 0;
zwd->outBuffer.dst = strm->next_out;
zwd->outBuffer.size = 0;
zwd->outBuffer.pos = 0;
{ size_t const dErr = ZSTD_decompressStream(zwd->zbd, &zwd->outBuffer, &zwd->inBuffer);
LOG_WRAPPERD("inflate ZSTD_decompressStream1 errorCode=%d srcSize=%d dstCapacity=%d\n",
(int)dErr, (int)zwd->inBuffer.size, (int)zwd->outBuffer.size);
if (ZSTD_isError(dErr)) {
LOG_WRAPPERD("ERROR: ZSTD_decompressStream1 %s\n", ZSTD_getErrorName(dErr));
goto error;
} }
if (zwd->inBuffer.pos != zwd->inBuffer.size) goto error; /* not consumed */
}
} /* (zwd->totalInBytes < ZSTD_HEADERSIZE) */
zwd->inBuffer.src = strm->next_in;
zwd->inBuffer.size = strm->avail_in;
zwd->inBuffer.pos = 0;
zwd->outBuffer.dst = strm->next_out;
zwd->outBuffer.size = strm->avail_out;
zwd->outBuffer.pos = 0;
{ size_t const dErr = ZSTD_decompressStream(zwd->zbd, &zwd->outBuffer, &zwd->inBuffer);
LOG_WRAPPERD("inflate ZSTD_decompressStream2 errorCode=%d srcSize=%d dstCapacity=%d\n",
(int)dErr, (int)strm->avail_in, (int)strm->avail_out);
if (ZSTD_isError(dErr)) {
zwd->errorCount++;
LOG_WRAPPERD("ERROR: ZSTD_decompressStream2 %s zwd->errorCount=%d\n", ZSTD_getErrorName(errorCode), zwd->errorCount);
LOG_WRAPPERD("ERROR: ZSTD_decompressStream2 %s zwd->errorCount=%d\n",
ZSTD_getErrorName(dErr), zwd->errorCount);
if (zwd->errorCount<=1) return Z_NEED_DICT; else goto error;
}
LOG_WRAPPERD("inflate inBuffer.pos=%d inBuffer.size=%d outBuffer.pos=%d outBuffer.size=%d o\n", (int)zwd->inBuffer.pos, (int)zwd->inBuffer.size, (int)zwd->outBuffer.pos, (int)zwd->outBuffer.size);
LOG_WRAPPERD("inflate inBuffer.pos=%d inBuffer.size=%d outBuffer.pos=%d outBuffer.size=%d o\n",
(int)zwd->inBuffer.pos, (int)zwd->inBuffer.size, (int)zwd->outBuffer.pos, (int)zwd->outBuffer.size);
strm->next_out += zwd->outBuffer.pos;
strm->total_out += zwd->outBuffer.pos;
strm->avail_out -= zwd->outBuffer.pos;
@ -799,13 +822,16 @@ ZEXTERN int ZEXPORT z_inflate OF((z_streamp strm, int flush))
zwd->totalInBytes += zwd->inBuffer.pos;
strm->next_in += zwd->inBuffer.pos;
strm->avail_in -= zwd->inBuffer.pos;
if (errorCode == 0) {
LOG_WRAPPERD("inflate Z_STREAM_END1 avail_in=%d avail_out=%d total_in=%d total_out=%d\n", (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
if (dErr == 0) {
LOG_WRAPPERD("inflate Z_STREAM_END1 avail_in=%d avail_out=%d total_in=%d total_out=%d\n",
(int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out);
zwd->decompState = ZWRAP_streamEnd;
return Z_STREAM_END;
}
}
LOG_WRAPPERD("- inflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n", (int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, Z_OK);
} /* dErr lifetime */
LOG_WRAPPERD("- inflate2 flush=%d avail_in=%d avail_out=%d total_in=%d total_out=%d res=%d\n",
(int)flush, (int)strm->avail_in, (int)strm->avail_out, (int)strm->total_in, (int)strm->total_out, Z_OK);
return Z_OK;
error:
@ -818,13 +844,13 @@ ZEXTERN int ZEXPORT z_inflateEnd OF((z_streamp strm))
if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved)
return inflateEnd(strm);
LOG_WRAPPERD("- inflateEnd total_in=%d total_out=%d\n", (int)(strm->total_in), (int)(strm->total_out));
{ size_t errorCode;
ZWRAP_DCtx* zwd = (ZWRAP_DCtx*) strm->state;
LOG_WRAPPERD("- inflateEnd total_in=%d total_out=%d\n",
(int)(strm->total_in), (int)(strm->total_out));
{ ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state;
if (zwd == NULL) return Z_OK; /* structures are already freed */
{ size_t const freeErr = ZWRAP_freeDCtx(zwd);
if (ZSTD_isError(freeErr)) return Z_STREAM_ERROR; }
strm->state = NULL;
errorCode = ZWRAP_freeDCtx(zwd);
if (ZSTD_isError(errorCode)) return Z_STREAM_ERROR;
}
return Z_OK;
}
@ -841,8 +867,6 @@ ZEXTERN int ZEXPORT z_inflateSync OF((z_streamp strm))
/* Advanced compression functions */
ZEXTERN int ZEXPORT z_deflateCopy OF((z_streamp dest,
z_streamp source))
@ -982,7 +1006,7 @@ ZEXTERN uLong ZEXPORT z_zlibCompileFlags OF((void)) { return zlibCompileFlags();
/* utility functions */
/* === utility functions === */
#ifndef Z_SOLO
ZEXTERN int ZEXPORT z_compress OF((Bytef *dest, uLongf *destLen,
@ -991,11 +1015,14 @@ ZEXTERN int ZEXPORT z_compress OF((Bytef *dest, uLongf *destLen,
if (!g_ZWRAP_useZSTDcompression)
return compress(dest, destLen, source, sourceLen);
{ size_t dstCapacity = *destLen;
size_t const errorCode = ZSTD_compress(dest, dstCapacity, source, sourceLen, ZWRAP_DEFAULT_CLEVEL);
LOG_WRAPPERD("z_compress sourceLen=%d dstCapacity=%d\n", (int)sourceLen, (int)dstCapacity);
if (ZSTD_isError(errorCode)) return Z_STREAM_ERROR;
*destLen = errorCode;
{ size_t dstCapacity = *destLen;
size_t const cSize = ZSTD_compress(dest, dstCapacity,
source, sourceLen,
ZWRAP_DEFAULT_CLEVEL);
LOG_WRAPPERD("z_compress sourceLen=%d dstCapacity=%d\n",
(int)sourceLen, (int)dstCapacity);
if (ZSTD_isError(cSize)) return Z_STREAM_ERROR;
*destLen = cSize;
}
return Z_OK;
}
@ -1009,9 +1036,9 @@ ZEXTERN int ZEXPORT z_compress2 OF((Bytef *dest, uLongf *destLen,
return compress2(dest, destLen, source, sourceLen, level);
{ size_t dstCapacity = *destLen;
size_t const errorCode = ZSTD_compress(dest, dstCapacity, source, sourceLen, level);
if (ZSTD_isError(errorCode)) return Z_STREAM_ERROR;
*destLen = errorCode;
size_t const cSize = ZSTD_compress(dest, dstCapacity, source, sourceLen, level);
if (ZSTD_isError(cSize)) return Z_STREAM_ERROR;
*destLen = cSize;
}
return Z_OK;
}
@ -1029,13 +1056,13 @@ ZEXTERN uLong ZEXPORT z_compressBound OF((uLong sourceLen))
ZEXTERN int ZEXPORT z_uncompress OF((Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen))
{
if (sourceLen < 4 || MEM_readLE32(source) != ZSTD_MAGICNUMBER)
if (!ZSTD_isFrame(source, sourceLen))
return uncompress(dest, destLen, source, sourceLen);
{ size_t dstCapacity = *destLen;
size_t const errorCode = ZSTD_decompress(dest, dstCapacity, source, sourceLen);
if (ZSTD_isError(errorCode)) return Z_STREAM_ERROR;
*destLen = errorCode;
size_t const dSize = ZSTD_decompress(dest, dstCapacity, source, sourceLen);
if (ZSTD_isError(dSize)) return Z_STREAM_ERROR;
*destLen = dSize;
}
return Z_OK;
}

View File

@ -27,7 +27,7 @@
.\" OpenBSD: tsearch.3,v 1.2 1998/06/21 22:13:49 millert Exp
.\" $FreeBSD$
.\"
.Dd October 9, 2016
.Dd June 4, 2017
.Dt TSEARCH 3
.Os
.Sh NAME
@ -130,6 +130,64 @@ is NULL or the datum cannot be found.
The
.Fn twalk
function returns no value.
.Sh EXAMPLES
This example uses
.Fn tsearch
to search for four strings in
.Dv root .
Because the strings are not already present, they are added.
.Fn tsearch
is called twice on the fourth string to demonstrate that a string is not added when it is already present.
.Fn tfind
is used to find the single instance of the fourth string, and
.Fn tdelete
removes it.
Finally,
.Fn twalk
is used to return and display the resulting binary search tree.
.Bd -literal
#include <stdio.h>
#include <search.h>
#include <string.h>
int
comp(const void *a, const void *b)
{
return strcmp(a, b);
}
void
printwalk(const posix_tnode * node, VISIT v, int __unused0)
{
if (v == postorder || v == leaf) {
printf("node: %s\en", *(char **)node);
}
}
int
main(void)
{
posix_tnode *root = NULL;
char one[] = "blah1";
char two[] = "blah-2";
char three[] = "blah-3";
char four[] = "blah-4";
tsearch(one, &root, comp);
tsearch(two, &root, comp);
tsearch(three, &root, comp);
tsearch(four, &root, comp);
tsearch(four, &root, comp);
printf("four: %s\en", *(char **)tfind(four, &root, comp));
tdelete(four, &root, comp);
twalk(root, printwalk);
return 0;
}
.Ed
.Sh SEE ALSO
.Xr bsearch 3 ,
.Xr hsearch 3 ,

View File

@ -641,6 +641,7 @@ static struct densities {
{ 0x53, 13452, 341681, "3592A3 (unencrypted)" },
{ 0x54, 19686, 500024, "3592A4 (unencrypted)" },
{ 0x55, 20670, 525018, "3592A5 (unencrypted)" },
{ 0x56, 20670, 525018, "3592B5 (unencrypted)" },
{ 0x58, 15142, 384607, "LTO-5" },
{ 0x5A, 15142, 384607, "LTO-6" },
{ 0x5C, 19107, 485318, "LTO-7" },
@ -650,6 +651,7 @@ static struct densities {
{ 0x73, 13452, 341681, "3592A3 (encrypted)" },
{ 0x74, 19686, 500024, "3592A4 (encrypted)" },
{ 0x75, 20670, 525018, "3592A5 (encrypted)" },
{ 0x76, 20670, 525018, "3592B5 (encrypted)" },
{ 0x8c, 1789, 45434, "EXB-8500c" },
{ 0x90, 1703, 43245, "EXB-8200c" },
{ 0, 0, 0, NULL }

View File

@ -100,7 +100,7 @@ _pthread_barrier_init(pthread_barrier_t *barrier,
pthread_barrier_t bar;
int pshared;
if (barrier == NULL || count <= 0)
if (barrier == NULL || count == 0)
return (EINVAL);
if (attr == NULL || *attr == NULL ||

View File

@ -309,7 +309,7 @@ The syntax of the direct invocation is
.Op Ar image arguments
.Ed
.Pp
The options are as follows:
The options are:
.Bl -tag -width indent
.It Fl f Ar fd
File descriptor
@ -339,21 +339,22 @@ Ends the
options.
The argument following
.Fl -
is interpreted as the path of binary to execute.
is interpreted as the path of the binary to execute.
.El
.Pp
To conform to user expectation to not break some naively restricted
execution environments, in the direct execution mode
In the direct execution mode,
.Nm
emulates verification of the binary execute permission
for current user.
emulates verification of the binary execute permission for the
current user.
This is done to avoid breaking user expectations in naively restricted
execution environments.
The verification only uses Unix
.Dv DACs ,
ignores
.Dv ACLs
and is racy by its nature.
The environments which rely on such restrictions are weak
and breakable on its own.
and is naturally prone to race conditions.
Environments which rely on such restrictions are weak
and breakable on their own.
.Sh FILES
.Bl -tag -width ".Pa /var/run/ld-elf32.so.hints" -compact
.It Pa /var/run/ld-elf.so.hints

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd December 4, 2015
.Dd July 14, 2017
.Dt ARCMSR 4
.Os
.Sh NAME
@ -147,6 +147,8 @@ ARC-1880
ARC-1882
.It
ARC-1883
.It
ARC-1884
.El
.Sh FILES
.Bl -tag -width ".Pa /dev/arcmsr?" -compact

View File

@ -3,8 +3,8 @@
#
# List of PCI ID's
#
# Version: 2017.05.25
# Date: 2017-05-25 03:15:02
# Version: 2017.07.13
# Date: 2017-07-13 03:15:01
#
# Maintained by Albert Pool, Martin Mares, and other volunteers from
# the PCI ID Project at http://pci-ids.ucw.cz/.
@ -561,6 +561,7 @@
00af SAS3408 Fusion-MPT Tri-Mode I/O Controller Chip (IOC)
1d49 0200 ThinkSystem 430-8i SAS/SATA 12Gb HBA
1d49 0202 ThinkSystem 430-8e SAS/SATA 12Gb HBA
1d49 0204 ThinkSystem 430-8i SAS/SATA 12Gb Dense HBA
00be SAS3504 Fusion-MPT Tri-Mode RAID On Chip (ROC)
00bf SAS3404 Fusion-MPT Tri-Mode I/O Controller Chip (IOC)
00c0 SAS3324 PCI-Express Fusion-MPT SAS-3
@ -2139,13 +2140,12 @@
# Radeon HD 7970 X2
1787 2317 Radeon HD 7990
1787 3000 Tahiti XT2 [Radeon HD 7970 GHz Edition]
6799 New Zealand [Radeon HD 7900 Series]
679a Tahiti PRO [Radeon HD 7950/8950 OEM / R9 280]
1002 0b01 Radeon HD 8950 OEM
1002 3000 Tahiti PRO2 [Radeon HD 7950 Boost]
1462 3000 Radeon HD 8950 OEM
174b a003 Radeon R9 280
679b Malta [Radeon HD 7990]
679b Malta [Radeon HD 7990/8990 OEM]
1002 0b28 Radeon HD 8990 OEM
1002 0b2a Radeon HD 7990
1462 8036 Radeon HD 8990 OEM
@ -2205,28 +2205,34 @@
174b e324 Sapphire Nitro R9 390
67b9 Vesuvius [Radeon R9 295X2]
67be Hawaii LE
67c0 Ellesmere [Polaris10]
67c0 Ellesmere [Radeon Pro WX 7100]
67c4 Ellesmere [Radeon Pro WX 7100]
1002 0336 Radeon Pro Duo
1002 1336 Radeon Pro Duo
67c7 Ellesmere [Radeon Pro WX 5100]
67ca Ellesmere [Polaris10]
67cc Ellesmere [Polaris10]
67cf Ellesmere [Polaris10]
67df Ellesmere [Radeon RX 470/480]
67df Ellesmere [Radeon RX 470/480/570/580]
1002 0b37 Radeon RX 480
1043 04a8 Radeon RX 480
1043 04b0 Radeon RX 470
1043 04fb Radeon RX 480
1043 04fd Radeon RX 480 8GB
1458 22f0 Radeon RX 570
1462 3411 Radeon RX 470
1462 3413 Radeon RX 480
148c 2372 Radeon RX 480
148c 2373 Radeon RX 470
1682 9470 Radeon RX 470
1682 9480 Radeon RX 480
1682 9588 Radeon RX 580 XTR
174b e347 Radeon RX 470/480
174b e349 Radeon RX 470
1787 a470 Radeon RX 470
1787 a480 Radeon RX 480
1da2 e353 Sapphire Radeon RX 580 Pulse 8GB
1da2 e366 Radeon RX 570
67e0 Baffin [Polaris11]
67e1 Baffin [Polaris11]
67e3 Baffin [Radeon Pro WX 4100]
@ -2234,7 +2240,7 @@
67e9 Baffin [Polaris11]
67eb Baffin [Polaris11]
67ef Baffin [Radeon RX 460]
67ff Baffin [Polaris11]
67ff Baffin [Radeon RX 560]
6800 Wimbledon XT [Radeon HD 7970M]
1002 0124 Radeon HD 7970M
8086 2110 Radeon HD 7970M
@ -2430,6 +2436,7 @@
144d c0c7 Radeon HD 7550M
6842 Thames LE [Radeon HD 7000M Series]
6843 Thames [Radeon HD 7670M]
6863 Vega 10 [Radeon Vega Frontier Edition]
687f Vega [Radeon RX Vega]
6888 Cypress XT [FirePro V8800]
6889 Cypress PRO [FirePro V7800]
@ -2940,9 +2947,10 @@
174b e308 Radeon R9 380 Nitro 4G D5
6980 Polaris12
6981 Polaris12
6985 Polaris12
6985 Lexa XT [Radeon PRO WX 3100]
6986 Polaris12
6987 Polaris12
6995 Lexa XT [Radeon PRO WX 2100]
699f Lexa PRO [Radeon RX 550]
700f RS100 AGP Bridge
7010 RS200/RS250 AGP Bridge
@ -3804,6 +3812,7 @@
1014 0338 PCI-X DDR Auxiliary Cache Adapter (575C)
0302 Winnipeg PCI-X Host Bridge
0308 CalIOC2 PCI-E Root Port
0311 FC 5740/1954 4-Port 10/100/1000 Base-TX PCI-X Adapter for POWER
0314 ZISC 036 Neural accelerator card
032d Axon - Cell Companion Chip
1014 03a1 PCIe PowerXCell 8i Cell Accelerator Board
@ -3834,6 +3843,7 @@
1014 04c7 PCIe3 x 8 Cache SAS RAID Internal Adapter 6GB(2CCA)
1014 04c8 PCIe3 x 8 Cache SAS RAID Internal Adapter 6GB(2CD2)
1014 04c9 PCIe3 x 8 Cache SAS RAID Internal Adapter 6GB(2CCD)
03dc POWER8 Host Bridge (PHB3)
044b GenWQE Accelerator Adapter
04aa Flash Adapter 90 (PCIe2 0.9TB)
04da PCI-E IPR SAS+ Adapter (ASIC)
@ -4499,6 +4509,8 @@
0533 MGA G200EH
103c 3381 iLO4
0534 G200eR2
0538 G200eH
1590 00e4 iLO5 VGA
0540 M91XX
102b 2080 M9140 LP PCIe x16
102b 20c0 Xenia
@ -5011,6 +5023,8 @@
12ee PCI-X 2.0 Local Bus Adapter
1302 RMP-3 Shared Memory Driver
1303 RMP-3 (Remote Management Processor)
22f6 iLO5 Virtual USB Controller
1590 00e4 iLO5 Standard Virtual USB Controller
2910 E2910A PCIBus Exerciser
2925 E2925A 32 Bit, 33 MHzPCI Exerciser & Analyzer
3206 Adaptec Embedded Serial ATA HostRAID
@ -5398,6 +5412,7 @@
1546 803c FWB-PCIE1X11B
8240 XIO2001 PCI Express-to-PCI Bridge
8241 TUSB73x0 SuperSpeed USB 3.0 xHCI Host Controller
1014 04b2 S824 (8286-42A)
8400 ACX 100 22Mbps Wireless Interface
1186 3b00 DWL-650+ PC Card cardbus 22Mbs Wireless Adapter [AirPlus]
1186 3b01 DWL-520+ 22Mbps PCI Wireless Adapter
@ -6015,6 +6030,7 @@
1077 02a7 QL45212-DE 25GbE Adapter
1077 e4f6 FastLinQ QL45211H 25GbE Adapter
1077 e4f7 FastLinQ QL45212H 25GbE Adapter
1590 0223 Synergy 6810C 25/50Gb Ethernet Adapter
165c FastLinQ QL45000 Series 40GbE Controller (FCoE)
1077 e4f1 FastLinQ QL45462H 40GbE FCoE Adapter
1077 e4f2 FastLinQ QL45461H 40GbE FCoE Adapter
@ -6068,8 +6084,12 @@
2432 ISP2432-based 4Gb Fibre Channel to PCI Express HBA
103c 7040 FC1142SR 4Gb 1-port PCIe Fibre Channel Host Bus Adapter [HPAE311A]
2532 ISP2532-based 8Gb Fibre Channel to PCI Express HBA
1014 041e FC EN0Y/EN12 PCIe2 LP 8 Gb 4-port Fibre Channel Adapter for POWER
103c 3262 StorageWorks 81Q
103c 3263 StorageWorks 82Q
1077 015c QLE2560 PCI Express to 8Gb FC Single Channel
1077 015d QLE2562 PCI Express to 8Gb FC Dual Channel
1077 015e QLE2564 PCI Express to 8Gb FC Quad Channel
1077 0167 QME2572 Dual Port FC8 HBA Mezzanine
1590 00fc StoreFabric 84Q 8Gb Quad Port Fibre Channel Host Bus Adapter
3022 ISP4022-based Ethernet NIC
@ -6084,6 +6104,7 @@
7322 IBA7322 QDR InfiniBand HCA
8000 10GbE Converged Network Adapter (TCP/IP Networking)
8001 10GbE Converged Network Adapter (FCoE)
1014 03af FC 5708/5270 10 Gb FCoE PCIe Dual Port Adapter for POWER
8020 cLOM8214 1/10GbE Controller
1028 1f64 QMD8262-k 10G DP bNDC KR
103c 3346 CN1000Q Dual Port Converged Network Adapter
@ -6107,15 +6128,27 @@
8031 8300 Series 10GbE Converged Network Adapter (FCoE)
8032 8300 Series 10GbE Converged Network Adapter (iSCSI)
8070 FastLinQ QL41000 Series 10/25/40/50GbE Controller
1077 0001 10GE 2P QL41162HxRJ-DE Adapter
1077 0002 10GE 2P QL41112HxCU-DE Adapter
1077 000b 25GE 2P QL41262HxCU-DE Adapter
1077 0011 FastLinQ QL41212H 25GbE Adapter
1077 0012 FastLinQ QL41112H 10GbE Adapter
8080 FastLinQ QL41000 Series 10/25/40/50GbE Controller (FCoE)
1077 0001 10GE 2P QL41162HxRJ-DE Adapter
1077 0002 10GE 2P QL41112HxCU-DE Adapter
1077 000b 25GE 2P QL41262HxCU-DE Adapter
1077 000d FastLinQ QL41262H 25GbE FCoE Adapter
1077 000e FastLinQ QL41162H 10GbE FCoE Adapter
8084 FastLinQ QL41000 Series 10/25/40/50GbE Controller (iSCSI)
1077 0001 10GE 2P QL41162HxRJ-DE Adapter
1077 0002 10GE 2P QL41112HxCU-DE Adapter
1077 000b 25GE 2P QL41262HxCU-DE Adapter
1077 000d FastLinQ QL41262H 25GbE iSCSI Adapter
1077 000e FastLinQ QL41162H 10GbE iSCSI Adapter
8090 FastLinQ QL41000 Series Gigabit Ethernet Controller (SR-IOV VF)
1077 0001 25GE 2P QL41262HxCU-DE Adapter
1077 0002 10GE 2P QL41112HxCU-DE Adapter
1077 000b 25GE 2P QL41262HxCU-DE Adapter
1077 000d FastLinQ QL41262H 25GbE FCoE Adapter (SR-IOV VF)
1077 000e FastLinQ QL41162H 10GbE iSCSI Adapter (SR-IOV VF)
1077 0011 FastLinQ QL41212H 25GbE Adapter (SR-IOV VF)
@ -9947,6 +9980,8 @@
0f02 GF108 [GeForce GT 730]
0f06 GF108 [GeForce GT 730]
0fb0 GM200 High Definition Audio
0fb8 GP108 High Definition Audio Controller
0fb9 GP107GL High Definition Audio Controller
0fbb GM204 High Definition Audio Controller
0fc0 GK107 [GeForce GT 640 OEM]
0fc1 GK107 [GeForce GT 640]
@ -10620,6 +10655,7 @@
134f GM108M [GeForce 920MX]
137a GM108GLM [Quadro K620M / Quadro M500M]
17aa 505a Quadro M500M
137b GM108GLM [Quadro M520 Mobile]
137d GM108M [GeForce 940A]
1380 GM107 [GeForce GTX 750 Ti]
1381 GM107 [GeForce GTX 750]
@ -10665,6 +10701,7 @@
13d8 GM204M [GeForce GTX 970M]
13d9 GM204M [GeForce GTX 965M]
13da GM204M [GeForce GTX 980]
13e7 GM204 [GeForce GTX 980 Engineering Sample]
13f0 GM204GL [Quadro M5000]
13f1 GM204GL [Quadro M4000]
13f2 GM204GL [Tesla M60]
@ -10690,7 +10727,7 @@
1617 GM204M [GeForce GTX 980M]
1618 GM204M [GeForce GTX 970M]
1619 GM204M [GeForce GTX 965M]
161a GM204M [GeForce GTX 980]
161a GM204M [GeForce GTX 980 Mobile]
1667 GM204M [GeForce GTX 965M]
1725 GP100
172e GP100
@ -10744,10 +10781,11 @@
1ca7 GP107GL
1ca8 GP107GL
1caa GP107GL
1cb1 GP107GL [Quardo P1000]
1cb2 GP107GL [Quardo P600]
1cb3 GP107GL [Quardo P400]
1cb1 GP107GL [Quadro P1000]
1cb2 GP107GL [Quadro P600]
1cb3 GP107GL [Quadro P400]
1d01 GP108 [GeForce GT 1030]
1d10 GP108 [GeForce MX150]
1d81 GV100
10df Emulex Corporation
0720 OneConnect NIC (Skyhawk)
@ -10771,7 +10809,8 @@
e100 Proteus-X: LightPulse IOV Fibre Channel Host Adapter
e131 LightPulse 8Gb/s PCIe Shared I/O Fibre Channel Adapter
e180 Proteus-X: LightPulse IOV Fibre Channel Host Adapter
e200 Lancer-X: LightPulse Fibre Channel Host Adapter
e200 LightPulse LPe16002
1014 03f1 PCIe2 16 Gb 2-port Fibre Channel Adapter (FC EL5B; CCIN 577F)
e208 LightPulse 16Gb Fibre Channel Host Adapter (Lancer-VF)
e220 OneConnect NIC (Lancer)
17aa 1054 ThinkServer LPm16002B-M6-L AnyFabric
@ -10800,6 +10839,7 @@
f0e5 Zephyr LightPulse Fibre Channel Host Adapter
f0f5 Neptune LightPulse Fibre Channel Host Adapter
f100 Saturn-X: LightPulse Fibre Channel Host Adapter
1014 038a 8Gb PCI Express Dual Port FC Adapter for POWER
103c 3282 8Gb Dual-port PCI-e FC HBA
f111 Saturn-X LightPulse Fibre Channel Host Adapter
f112 Saturn-X LightPulse Fibre Channel Host Adapter
@ -10822,6 +10862,8 @@
fc40 Saturn-X: LightPulse Fibre Channel Host Adapter
fc50 Proteus-X: LightPulse IOV Fibre Channel Host Adapter
fd00 Helios-X LightPulse Fibre Channel Host Adapter
# Also IBM FC 5759 / FC 1910 for POWER
10df fd02 LightPulse LP11002 Dual-port 4Gigabit PCI Fibre Channel Adapter
fd11 Helios-X LightPulse Fibre Channel Host Adapter
fd12 Helios-X LightPulse Fibre Channel Host Adapter
fe00 Zephyr-X LightPulse Fibre Channel Host Adapter
@ -10923,6 +10965,7 @@
524a RTS524A PCI Express Card Reader
5250 RTS5250 PCI Express Card Reader
525a RTS525A PCI Express Card Reader
17aa 224f ThinkPad X1 Carbon 5th Gen
5286 RTS5286 PCI Express Card Reader
5287 RTL8411B PCI Express Card Reader
5288 RTS5288 PCI Express Card Reader
@ -11028,6 +11071,7 @@
1775 11cc CC11/CL11
1849 8168 Motherboard (one of many)
7470 3468 TG-3468 Gigabit PCI Express Network Adapter
8086 2055 NUC Kit DN2820FYKH
8086 d615 Desktop Board D510MO/D525MW
8169 RTL8169 PCI Gigabit Ethernet Controller
1025 0079 Aspire 5024WLMi
@ -14773,7 +14817,7 @@
12d6 Analogic Corp
12d7 Biotronic SRL
12d8 Pericom Semiconductor
01a7 PI7C21P100 PCI to PCI Bridge
01a7 7C21P100 2-port PCI-X to PCI-X Bridge
400a PI7C9X442SL PCI Express Bridge Port
400e PI7C9X442SL USB OHCI Controller
400f PI7C9X442SL USB EHCI Controller
@ -15081,6 +15125,8 @@
5160 RealSSD P420h
5161 RealSSD P420m
5163 RealSSD P425m
5180 9100 PRO NVMe SSD
5181 9100 MAX NVMe SSD
1345 Arescom Inc
1347 Odetics
1349 Sumitomo Electric Industries, Ltd.
@ -16081,6 +16127,7 @@
509f T540-509F Unified Wire Ethernet Controller
50a0 T540-50A0 Unified Wire Ethernet Controller
50a1 T540-50A1 Unified Wire Ethernet Controller
50a2 T580-50A2 Unified Wire Ethernet Controller
5401 T520-CR Unified Wire Ethernet Controller
5402 T522-CR Unified Wire Ethernet Controller
5403 T540-CR Unified Wire Ethernet Controller
@ -16133,6 +16180,7 @@
549f T540-509F Unified Wire Ethernet Controller
54a0 T540-50A0 Unified Wire Ethernet Controller
54a1 T540-50A1 Unified Wire Ethernet Controller
54a2 T580-50A2 Unified Wire Ethernet Controller
5501 T520-CR Unified Wire Storage Controller
5502 T522-CR Unified Wire Storage Controller
5503 T540-CR Unified Wire Storage Controller
@ -16185,6 +16233,7 @@
559f T540-509F Unified Wire Storage Controller
55a0 T540-50A0 Unified Wire Storage Controller
55a1 T540-50A1 Unified Wire Storage Controller
55a2 T580-50A2 Unified Wire Storage Controller
5601 T520-CR Unified Wire Storage Controller
5602 T522-CR Unified Wire Storage Controller
5603 T540-CR Unified Wire Storage Controller
@ -16237,6 +16286,7 @@
569f T540-509F Unified Wire Storage Controller
56a0 T540-50A0 Unified Wire Storage Controller
56a1 T540-50A1 Unified Wire Storage Controller
56a2 T580-50A2 Unified Wire Storage Controller
5701 T520-CR Unified Wire Ethernet Controller
5702 T522-CR Unified Wire Ethernet Controller
5703 T540-CR Unified Wire Ethernet Controller
@ -16328,6 +16378,7 @@
589f T540-509F Unified Wire Ethernet Controller [VF]
58a0 T540-50A0 Unified Wire Ethernet Controller [VF]
58a1 T540-50A1 Unified Wire Ethernet Controller [VF]
58a2 T580-50A2 Unified Wire Ethernet Controller [VF]
6001 T6225-CR Unified Wire Ethernet Controller
6002 T6225-SO-CR Unified Wire Ethernet Controller
6003 T6425-CR Unified Wire Ethernet Controller
@ -16343,6 +16394,9 @@
6015 T6201-BT Unified Wire Ethernet Controller
6080 T6225-6080 Unified Wire Ethernet Controller
6081 T62100-6081 Unified Wire Ethernet Controller
6082 T6225-6082 Unified Wire Ethernet Controller
6083 T62100-6083 Unified Wire Ethernet Controller
6084 T64100-6084 Unified Wire Ethernet Controller
6401 T6225-CR Unified Wire Ethernet Controller
6402 T6225-SO-CR Unified Wire Ethernet Controller
6403 T6425-CR Unified Wire Ethernet Controller
@ -16358,6 +16412,9 @@
6415 T6201-BT Unified Wire Ethernet Controller
6480 T6225-6080 Unified Wire Ethernet Controller
6481 T62100-6081 Unified Wire Ethernet Controller
6482 T6225-6082 Unified Wire Ethernet Controller
6483 T62100-6083 Unified Wire Ethernet Controller
6484 T64100-6084 Unified Wire Ethernet Controller
6501 T6225-CR Unified Wire Storage Controller
6502 T6225-SO-CR Unified Wire Storage Controller
6503 T6425-CR Unified Wire Storage Controller
@ -16373,6 +16430,9 @@
6515 T6201-BT Unified Wire Storage Controller
6580 T6225-6080 Unified Wire Storage Controller
6581 T62100-6081 Unified Wire Storage Controller
6582 T6225-6082 Unified Wire Storage Controller
6583 T62100-6083 Unified Wire Storage Controller
6584 T64100-6084 Unified Wire Storage Controller
6601 T6225-CR Unified Wire Storage Controller
6602 T6225-SO-CR Unified Wire Storage Controller
6603 T6425-CR Unified Wire Storage Controller
@ -16388,6 +16448,9 @@
6615 T6201-BT Unified Wire Storage Controller
6680 T6225-6080 Unified Wire Storage Controller
6681 T62100-6081 Unified Wire Storage Controller
6682 T6225-6082 Unified Wire Storage Controller
6683 T62100-6083 Unified Wire Storage Controller
6684 T64100-6084 Unified Wire Storage Controller
6801 T6225-CR Unified Wire Ethernet Controller [VF]
6802 T6225-SO-CR Unified Wire Ethernet Controller [VF]
6803 T6425-CR Unified Wire Ethernet Controller [VF]
@ -16403,6 +16466,9 @@
6815 T6201-BT Unified Wire Ethernet Controller [VF]
6880 T6225-6080 Unified Wire Ethernet Controller [VF]
6881 T62100-6081 Unified Wire Ethernet Controller [VF]
6882 T6225-6082 Unified Wire Ethernet Controller [VF]
6883 T62100-6083 Unified Wire Ethernet Controller [VF]
6884 T64100-6084 Unified Wire Ethernet Controller [VF]
a000 PE10K Unified Wire Ethernet Controller
1426 Storage Technology Corp.
1427 Better On-Line Solutions
@ -17003,6 +17069,7 @@
1028 1f68 BCM57800 1-Gigabit Ethernet
168d NetXtreme II BCM57840 10/20 Gigabit Ethernet
168e NetXtreme II BCM57810 10 Gigabit Ethernet
1014 0492 PCIe2 2-port 10 GbE BaseT RJ45 Adapter (FC EN0W; CCIN 2CC4)
103c 1798 Flex-10 10Gb 2-port 530FLB Adapter [Meru]
103c 17a5 Flex-10 10Gb 2-port 530M Adapter
103c 18d3 Ethernet 10Gb 2-port 530T Adapter
@ -17929,7 +17996,8 @@
0001 SOC-it 101 System Controller
1540 PROVIDEO MULTIMEDIA Co Ltd
1541 MACHONE Communications
1542 Concurrent Computer Corporation
# nee VIVID Technology Inc.
1542 Concurrent Real-Time
9260 RCIM-II Real-Time Clock & Interrupt Module
9271 RCIM-III Real-Time Clock & Interrupt Module (PCIe)
9272 Pulse Width Modulator Card
@ -18133,6 +18201,7 @@
103c 18d6 InfiniBand FDR/EN 10/40Gb Dual Port 544QSFP Adapter
15b3 0025 ConnectX-3 IB QDR Dual Port Mezzanine Card
15b3 0026 ConnectX-3 IB FDR Dual Port Mezzanine Card
15b3 0028 ConnectX-3 VPI Dual QSFP+ Port QDR Infiniband 40Gb/s or 10Gb Ethernet
15b3 0059 ConnectX-3 VPI IB FDR/40 GbE Single Port QSFP+ Mezzanine Card
15b3 0065 ConnectX-3 VPI IB FDR/40 GbE Dual Port QSFP+ Adapter
15b3 0066 ConnectX-3 IB FDR10 Dual Port Mezzanine Card
@ -18209,6 +18278,7 @@
6372 MT25408 [ConnectX EN 10GigE 10GBaseT, PCIe 2.0 2.5GT/s]
6732 MT26418 [ConnectX VPI PCIe 2.0 5GT/s - IB DDR / 10GigE]
673c MT26428 [ConnectX VPI PCIe 2.0 5GT/s - IB QDR / 10GigE]
1014 0487 GX++ 1-port 4X IB QDR Adapter for Power 795
103c 1782 4X QDR InfiniBand Mezzanine HCA for c-Class BladeSystem
15b3 0021 HP InfiniBand 4X QDR CX-2 PCI-e G2 Dual Port HCA
6746 MT26438 [ConnectX VPI PCIe 2.0 5GT/s - IB QDR / 10GigE Virtualization+]
@ -18217,6 +18287,8 @@
6750 MT26448 [ConnectX EN 10GigE, PCIe 2.0 5GT/s]
1014 0461 2-Port 10 GbE RoCE SR LP PCIe2 (rev b0)
15b3 0018 HP 10 GbE PCI-e G2 Dual-Port NIC (rev C1)
# FC EC26
15b3 6572 IBM Flex System EN4132 2-port 10Gb RoCE Adapter
675a MT25408 [ConnectX EN 10GigE 10GBaseT, PCIe Gen2 5GT/s]
6764 MT26468 [ConnectX EN 10GigE, PCIe 2.0 5GT/s Virtualization+]
103c 3313 NC542m Dual Port Flex-10 10GbE BLc Adapter
@ -19746,15 +19818,15 @@
1924 800e SFN7x42Q-R2 Flareon Ultra 7000 Series 10/40G Adapter
1924 800f SFN7xx4F-R1 Flareon Ultra 7000 Series 10G Adapter
0a03 SFC9220 10/40G Ethernet Controller
1924 8011 SFN8022-R1 Flareon 8000 Series 10G Adapter
1924 8012 SFN8522-R1 Flareon Ultra 8000 Series 10G Adapter
1924 8013 SFN8042-R1 Flareon 8000 Series 10/40G Adapter
1924 8014 SFN8542-R1 Flareon Ultra 8000 Series 10/40G Adapter
1924 8016 SFN8022-R2 Flareon 8000 Series 10G Adapter
1924 8017 SFN8522-R2 Flareon Ultra 8000 Series 10G Adapter
1924 8018 SFN8042-R2 Flareon 8000 Series 10/40G Adapter
1924 8019 SFN8542-R2 Flareon Ultra 8000 Series 10/40G Adapter
1924 801a SFN8722-R1 Flareon Ultra 8000 Series OCP 10G Adapter
1924 8011 SFN8022-R1 8000 Series 10G Adapter
1924 8012 SFN8522-R1 8000 Series 10G Adapter
1924 8013 SFN8042-R1 8000 Series 10/40G Adapter
1924 8014 SFN8542-R1 8000 Series 10/40G Adapter
1924 8016 SFN8022-R2 8000 Series 10G Adapte
1924 8017 SFN8522-R2 8000 Series 10G Adapter
1924 8018 SFN8042-R2 8000 Series 10/40G Adapter
1924 8019 SFN8542-R2 8000 Series 10/40G Adapter
1924 801a SFN8722-R1 8000 Series OCP 10G Adapter
1803 SFC9020 10G Ethernet Controller (Virtual Function)
1813 SFL9021 10GBASE-T Ethernet Controller (Virtual Function)
1903 SFC9120 10G Ethernet Controller (Virtual Function)
@ -20008,20 +20080,29 @@
0212 BladeEngine2 10Gb Gen2 PCIe iSCSI Adapter
0221 BladeEngine3 10Gb Gen2 PCIe Network Adapter
0222 BladeEngine3 10Gb Gen2 PCIe iSCSI Adapter
0700 OneConnect 10Gb NIC
0700 OneConnect OCe10100/OCe10102 Series 10 GbE
103c 1747 NC550SFP DualPort 10GbE Server Adapter
103c 1749 NC550SFP Dual Port Server Adapter
103c 174a NC551m Dual Port FlexFabric 10Gb Adapter
103c 174b StorageWorks NC550 DualPort Converged Network Adapter
103c 3314 NC551i Dual Port FlexFabric 10Gb Adapter
0702 OneConnect 10Gb iSCSI Initiator
0704 OneConnect 10Gb FCoE Initiator
0704 OneConnect OCe10100/OCe10102 Series 10 GbE CNA
10df e602 OneConnect OCe10100 10Gb CNA
10df e630 OneConnect OCe10102-FM-E / OCe10102-FX-E for EMC VNX Symmetrix
0710 OneConnect 10Gb NIC (be3)
# FC 5287 / FC 5284; CCIN 5287
1014 03d0 PCIe2 2-port 10GbE SR Adapter for POWER
# FC 5288 / FC 5286; CCIN 5288
1014 03d1 PCIe2 2-port 10GbE SFP+ Copper Adapter for POWER
1014 0409 Integrated Multifunction Card with Dual 10GbE SR Optical + Dual 1GbE for Power 750/760
1014 040a Integrated Multifunction Card with Dual 10GbE SR Copper + Dual 1GbE for Power 750/760
103c 3315 NC553i 10Gb 2-port FlexFabric Converged Network Adapter
103c 3340 NC552SFP 2-port 10Gb Server Adapter
103c 3341 NC552m 10Gb 2-port FlexFabric Converged Network Adapter
103c 3345 NC553m 10Gb 2-port FlexFabric Converged Network Adapter
103c 337b NC554FLB 10Gb 2-port FlexFabric Converged Network Adapter
10df e733 Flex System EN4054 4-port 10Gb Ethernet Mezzanine Adapter
0712 OneConnect 10Gb iSCSI Initiator (be3)
0714 OneConnect 10Gb FCoE Initiator (be3)
103c 3315 NC553i 10Gb 2-port FlexFabric Converged Network Adapter
@ -20297,6 +20378,7 @@
001e ADQ208
001f DSU
0020 ADQ14
0023 ADQ7
2014 TX320
2019 S6000
# now owned by HGST (a Western Digital subsidiary)
@ -20505,6 +20587,7 @@
0004 ExaNIC X10-GM
0005 ExaNIC X40
0006 ExaNIC X10-HPT
0007 ExaNIC X40
1cf7 Subspace Dynamics
1d00 Pure Storage
1d18 RME
@ -20549,10 +20632,11 @@
4200 A5PL-E1-10GETI [10 GbE Ethernet Traffic Instrument]
1d78 DERA
1d7c Aerotech, Inc.
1d87 Rockchip Inc. RK3399 PCI Express Root Port
1d87 Fuzhou Rockchip Electronics Co., Ltd
1d8f Enyx
1d95 Graphcore Ltd
1da1 Teko Telecom S.r.l.
1da2 Sapphire Technology Limited
1de1 Tekram Technology Co.,Ltd.
0391 TRM-S1040 [DC-315 / DC-395 series]
2020 DC-390
@ -20605,6 +20689,7 @@
1fc9 3015 Ethernet Adapter
4026 TN9610 10GbE SFP+ Ethernet Adapter
4027 TN9710P 10GBase-T/NBASE-T Ethernet Adapter
1154 0368 LGY-PCIE-MG
1432 8104 10 Gigabit Ethernet PCI Express Adapter
1fc9 3015 Ethernet Adapter
4527 TN9710Q 5GBase-T/NBASE-T Ethernet Adapter
@ -22513,6 +22598,7 @@
10ba 80003ES2LAN Gigabit Ethernet Controller (Copper)
10bb 80003ES2LAN Gigabit Ethernet Controller (Serdes)
10bc 82571EB Gigabit Ethernet Controller (Copper)
1014 0368 4-Port 10/100/1000 Base-TX PCI Express Adapter for POWER
103c 704b NC364T PCI Express Quad Port Gigabit Server Adapter
108e 11bc x4 PCI-Express Quad Gigabit Ethernet UTP Low Profile Adapter
8086 10bc PRO/1000 PT Quad Port LP Server Adapter
@ -23016,6 +23102,7 @@
1529 82599 10 Gigabit Dual Port Network Connection with FCoE
152a 82599 10 Gigabit Dual Port Backplane Connection with FCoE
152e 82599 Virtual Function
152f I350 Virtual Function
1530 X540 Virtual Function
1533 I210 Gigabit Network Connection
103c 0003 Ethernet I210-T1 GbE NIC
@ -23074,6 +23161,8 @@
18d4 0c08 X550 10Gb 2-port RJ45 OCP Mezz Card MOP81-I-10GT2
8086 0001 Ethernet Converged Network Adapter X550-T2
8086 001a Ethernet Converged Network Adapter X550-T2
8086 001b Ethernet Server Adapter X550-T2 for OCP
8086 001d Ethernet 10G 2P X550-t Adapter
8086 0022 Ethernet Converged Network Adapter X550-T2
1564 X550 Virtual Function
1565 X550 Virtual Function
@ -23216,22 +23305,38 @@
15be Ethernet Connection (6) I219-V
15bf JHL6240 Thunderbolt 3 NHI (Low Power) [Alpine Ridge LP 2016]
15c0 JHL6240 Thunderbolt 3 Bridge (Low Power) [Alpine Ridge LP 2016]
15c2 Ethernet Connection X553 Backplane
15c3 Ethernet Connection X553 Backplane
15c4 Ethernet Connection X553 10 GbE SFP+
15c5 X553 Virtual Function
15c6 Ethernet Connection X553 1GbE
15c7 Ethernet Connection X553 1GbE
15c8 Ethernet Connection X553/X557-AT 10GBASE-T
15ce Ethernet Connection X553 10 GbE SFP+
15d0 Ethernet SDI Adapter FM10420-100GbE-QDA2
15d1 Ethernet Controller 10G X550T
8086 0002 Ethernet Converged Network Adapter X550-T1
8086 001b Ethernet Server Adapter X550-T1 for OCP
8086 0021 Ethernet Converged Network Adapter X550-T1
8086 00a2 Ethernet Converged Network Adapter X550-T1
15d2 JHL6540 Thunderbolt 3 NHI (C step) [Alpine Ridge 4C 2016]
15d3 JHL6540 Thunderbolt 3 Bridge (C step) [Alpine Ridge 4C 2016]
15d4 JHL6540 Thunderbolt 3 USB Controller (C step) [Alpine Ridge 4C 2016]
15d5 Ethernet SDI Adapter FM10420-25GbE-DA2
8086 0001 Intel(R) Ethernet SDI Adapter FM10420-25GbE-DA2
15d6 Ethernet Connection (5) I219-V
15d7 Ethernet Connection (4) I219-LM
15d8 Ethernet Connection (4) I219-V
17aa 224f ThinkPad X1 Carbon 5th Gen
15d9 JHL6340 Thunderbolt 3 NHI (C step) [Alpine Ridge 2C 2016]
15da JHL6340 Thunderbolt 3 Bridge (C step) [Alpine Ridge 2C 2016]
15df Ethernet Connection (8) I219-LM
15e0 Ethernet Connection (8) I219-V
15e1 Ethernet Connection (9) I219-LM
15e2 Ethernet Connection (9) I219-V
15e3 Ethernet Connection (5) I219-LM
15e4 Ethernet Connection X553 1GbE
15e5 Ethernet Connection X553 1GbE
1600 Broadwell-U Host Bridge -OPI
1601 Broadwell-U PCI Express x16 Controller
1602 Broadwell-U Integrated Graphics
@ -23271,31 +23376,32 @@
163d Broadwell-U Integrated Graphics
163e Broadwell-U Integrated Graphics
1889 Ethernet Adaptive Virtual Function
1900 Skylake Host Bridge/DRAM Registers
1901 Skylake PCIe Controller (x16)
1900 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1901 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor PCIe Controller (x16)
1902 HD Graphics 510
1903 Skylake Processor Thermal Subsystem
1904 Skylake Host Bridge/DRAM Registers
1903 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Thermal Subsystem
1904 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1028 06f3 Latitude 3570
17aa 382a B51-80 Laptop
1905 Skylake PCIe Controller (x8)
1905 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor PCIe Controller (x8)
1906 HD Graphics 510
17aa 382a B51-80 Laptop
1908 Skylake Host Bridge/DRAM Registers
1909 Skylake PCIe Controller (x4)
190c Skylake Host Bridge/DRAM Registers
190f Skylake Host Bridge/DRAM Registers
1910 Skylake Host Bridge/DRAM Registers
1911 Skylake Gaussian Mixture Model
1908 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1909 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor PCIe Controller (x4)
190c Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
190f Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1910 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1911 Xeon E3-1200 v5/v6 / E3-1500 v5 / 6th/7th Gen Core Processor Gaussian Mixture Model
17aa 224f ThinkPad X1 Carbon 5th Gen
1912 HD Graphics 530
1916 HD Graphics 520
1028 06f3 Latitude 3570
1918 Skylake Host Bridge/DRAM Registers
1919 Skylake Imaging Unit
1918 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1919 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Imaging Unit
191b HD Graphics 530
191d HD Graphics P530
191e HD Graphics 515
191f Skylake Host Bridge/DRAM Registers
191f Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1921 HD Graphics 520
1926 Iris Graphics 540
1927 Iris Graphics 550
@ -24531,6 +24637,7 @@
24fd Wireless 8265 / 8275
# Windstorm Peak
8086 0010 Dual Band Wireless-AC 8265
8086 1130 Dual Band Wireless-AC 8265
2500 82820 820 (Camino) Chipset Host Bridge (MCH)
1028 0095 Precision Workstation 220 Chipset
1043 801c P3C-2000 system chipset
@ -26676,6 +26783,7 @@
17aa 4021 Intel Ethernet Connection X722 for 10G SFP+
17aa 4022 Ethernet Connection X722 for 10GbE SFP+
37d1 Ethernet Connection X722 for 1GbE
14cd 0010 88E1514 Ethernet OCP 2x1G RJ45 Phy Card [USI-1514-1GbaseT]
1590 0216 Ethernet 1Gb 2-port 368i Adapter
1590 0217 Ethernet 1Gb 2-port 368FLR-MMT Adapter
1590 0247 Ethernet 1Gb 4-port 369i Adapter
@ -27229,9 +27337,13 @@
5845 QEMU NVM Express Controller
1af4 1100 QEMU Virtual Machine
5902 HD Graphics 610
5904 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
17aa 224f ThinkPad X1 Carbon 5th Gen
590f Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
5910 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
5912 HD Graphics 630
5916 HD Graphics 620
17aa 224f ThinkPad X1 Carbon 5th Gen
5a84 Celeron N3350/Pentium N4200/Atom E3900 Series Integrated Graphics Controller
5a88 Celeron N3350/Pentium N4200/Atom E3900 Series Imaging Unit
5a98 Celeron N3350/Pentium N4200/Atom E3900 Series Audio Cluster
@ -27908,6 +28020,8 @@
9d03 Sunrise Point-LP SATA Controller [AHCI mode]
1028 06f3 Latitude 3570
17aa 382a B51-80 Laptop
9d10 Sunrise Point-LP PCI Express Root Port #1
9d12 Sunrise Point-LP PCI Express Root Port #3
9d14 Sunrise Point-LP PCI Express Root Port #5
17aa 382a B51-80 Laptop
9d15 Sunrise Point-LP PCI Express Root Port #6
@ -27918,9 +28032,11 @@
17aa 382a B51-80 Laptop
9d21 Sunrise Point-LP PMC
1028 06f3 Latitude 3570
17aa 224f ThinkPad X1 Carbon 5th Gen
17aa 382a B51-80 Laptop
9d23 Sunrise Point-LP SMBus
1028 06f3 Latitude 3570
17aa 224f ThinkPad X1 Carbon 5th Gen
17aa 382a B51-80 Laptop
9d27 Sunrise Point-LP Serial IO UART Controller #0
9d28 Sunrise Point-LP Serial IO UART Controller #1
@ -27932,14 +28048,18 @@
17aa 382a B51-80 Laptop
9d31 Sunrise Point-LP Thermal subsystem
1028 06f3 Latitude 3570
17aa 224f ThinkPad X1 Carbon 5th Gen
17aa 382a B51-80 Laptop
9d3a Sunrise Point-LP CSME HECI #1
1028 06f3 Latitude 3570
17aa 224f ThinkPad X1 Carbon 5th Gen
17aa 382a B51-80 Laptop
9d43 Sunrise Point-LP LPC Controller
17aa 382a B51-80 Laptop
9d48 Sunrise Point-LP LPC Controller
1028 06f3 Latitude 3570
9d58 Sunrise Point-LP LPC Controller
17aa 224f ThinkPad X1 Carbon 5th Gen
9d60 Sunrise Point-LP Serial IO I2C Controller #0
1028 06f3 Latitude 3570
8086 9d60 100 Series PCH/Sunrise Point PCH I2C0 [Skylake/Kaby Lake LPSS I2C]
@ -28520,6 +28640,20 @@
9005 0552 Series 8 - ASR-8805 - 8 internal 0 external 12G SAS Port/PCIe 3.0
9005 0553 Series 8 - ASR-8085 - 0 internal 8 external 12G SAS Port/PCIe 3.0
9005 0554 Series 8 - ASR-8885 - 8 internal 8 external 12G SAS Port/PCIe 3.0
028f Smart Storage PQI 12G SAS/PCIe 3
103c 0600 Smart Array P408i-p SR Gen10
103c 0601 Smart Array P408e-p SR Gen10
103c 0602 Smart Array P408i-a SR Gen10
103c 0603 Smart Array P408i-c SR Gen10
103c 0650 Smart Array E208i-p SR Gen10
103c 0651 Smart Array E208e-p SR Gen10
103c 0652 Smart Array E208i-c SR Gen10
103c 0654 Smart Array E208i-a SR Gen10
103c 0655 Smart Array P408e-m SR Gen10
103c 0700 Smart Array P204i-c SR Gen10
103c 0701 Smart Array P204i-b SR Gen10
103c 1100 Smart Array P816i-a SR Gen10
103c 1101 Smart Array P416ie-m SR G10
0410 AIC-9410W SAS (Razor HBA RAID)
9005 0410 ASC-48300(Spirit RAID)
9005 0411 ASC-58300 (Oakmont RAID)

View File

@ -207,7 +207,7 @@ typedef enum {
/* Serial Management Protocol */
XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED,
/* Execiute the requestred NVMe I/O operation */
/* Execute the requested NVMe I/O operation */
XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED,
/* Placeholder for MMC / SD / SDIO I/O stuff */
@ -216,6 +216,9 @@ typedef enum {
| XPT_FC_XPT_ONLY,
/* Scan Target */
XPT_NVME_ADMIN = 0x1f | XPT_FC_DEV_QUEUED,
/* Execute the requested NVMe Admin operation */
/* HBA engine commands 0x20->0x2F */
XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY,
/* HBA engine feature inquiry */
@ -819,7 +822,7 @@ struct ccb_relsim {
};
/*
* NVMe I/O Request CCB used for the XPT_NVME_IO function code.
* NVMe I/O Request CCB used for the XPT_NVME_IO and XPT_NVME_ADMIN function codes.
*/
struct ccb_nvmeio {
struct ccb_hdr ccb_h;
@ -1515,6 +1518,21 @@ cam_fill_nvmeio(struct ccb_nvmeio *nvmeio, u_int32_t retries,
nvmeio->data_ptr = data_ptr;
nvmeio->dxfer_len = dxfer_len;
}
static __inline void
cam_fill_nvmeadmin(struct ccb_nvmeio *nvmeio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
u_int32_t flags, u_int8_t *data_ptr, u_int32_t dxfer_len,
u_int32_t timeout)
{
nvmeio->ccb_h.func_code = XPT_NVME_ADMIN;
nvmeio->ccb_h.flags = flags;
nvmeio->ccb_h.retry_count = retries;
nvmeio->ccb_h.cbfcnp = cbfcnp;
nvmeio->ccb_h.timeout = timeout;
nvmeio->data_ptr = data_ptr;
nvmeio->dxfer_len = dxfer_len;
}
__END_DECLS
#endif /* _CAM_CAM_CCB_H */

View File

@ -848,6 +848,17 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
dirs[1] = CAM_DIR_IN;
numbufs = 2;
break;
case XPT_NVME_IO:
case XPT_NVME_ADMIN:
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
return (0);
if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
return (EINVAL);
data_ptrs[0] = &ccb->nvmeio.data_ptr;
lengths[0] = ccb->nvmeio.dxfer_len;
dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
numbufs = 1;
break;
case XPT_DEV_ADVINFO:
if (ccb->cdai.bufsiz == 0)
return (0);
@ -1014,6 +1025,11 @@ cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
numbufs = min(mapinfo->num_bufs_used, 1);
data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
break;
case XPT_NVME_IO:
case XPT_NVME_ADMIN:
data_ptrs[0] = &ccb->nvmeio.data_ptr;
numbufs = min(mapinfo->num_bufs_used, 1);
break;
default:
/* allow ourselves to be swapped once again */
PRELE(curproc);

View File

@ -2689,8 +2689,8 @@ xpt_action_default(union ccb *start_ccb)
start_ccb->ataio.resid = 0;
/* FALLTHROUGH */
case XPT_NVME_IO:
if (start_ccb->ccb_h.func_code == XPT_NVME_IO)
start_ccb->nvmeio.resid = 0;
/* FALLTHROUGH */
case XPT_NVME_ADMIN:
/* FALLTHROUGH */
case XPT_MMC_IO:
/* XXX just like nmve_io? */
@ -5548,6 +5548,7 @@ static struct kv map[] = {
{ XPT_MMC_IO, "XPT_MMC_IO" },
{ XPT_SMP_IO, "XPT_SMP_IO" },
{ XPT_SCAN_TGT, "XPT_SCAN_TGT" },
{ XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
{ XPT_ENG_INQ, "XPT_ENG_INQ" },
{ XPT_ENG_EXEC, "XPT_ENG_EXEC" },
{ XPT_EN_LUN, "XPT_EN_LUN" },

View File

@ -1146,6 +1146,11 @@ passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
numbufs = min(io_req->num_bufs, 1);
data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
break;
case XPT_NVME_IO:
case XPT_NVME_ADMIN:
data_ptrs[0] = &ccb->nvmeio.data_ptr;
numbufs = min(io_req->num_bufs, 1);
break;
default:
/* allow ourselves to be swapped once again */
return;
@ -1384,6 +1389,25 @@ passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
dirs[0] = CAM_DIR_IN;
numbufs = 1;
break;
case XPT_NVME_ADMIN:
case XPT_NVME_IO:
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
return (0);
io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
/*
* We only support a single virtual address for NVMe
*/
if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
return (EINVAL);
data_ptrs[0] = &ccb->nvmeio.data_ptr;
lengths[0] = ccb->nvmeio.dxfer_len;
dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
numbufs = 1;
maxmap = softc->maxio;
break;
default:
return(EINVAL);
break; /* NOTREACHED */
@ -1957,7 +1981,8 @@ passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread
*/
if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO)
|| (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH)
|| (fc == XPT_DEV_ADVINFO)) {
|| (fc == XPT_DEV_ADVINFO)
|| (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) {
error = passmemsetup(periph, io_req);
if (error != 0)
goto camioqueue_error;

View File

@ -79,24 +79,31 @@ SYSCALL32_MODULE(syscallname, \
& syscallname##_syscall32, & syscallname##_sysent32,\
NULL, NULL);
#define SYSCALL32_INIT_HELPER(syscallname) { \
#define SYSCALL32_INIT_HELPER_F(syscallname, flags) { \
.new_sysent = { \
.sy_narg = (sizeof(struct syscallname ## _args ) \
/ sizeof(register_t)), \
.sy_call = (sy_call_t *)& syscallname, \
.sy_flags = (flags) \
}, \
.syscall_no = FREEBSD32_SYS_##syscallname \
}
#define SYSCALL32_INIT_HELPER_COMPAT(syscallname) { \
#define SYSCALL32_INIT_HELPER_COMPAT_F(syscallname, flags) { \
.new_sysent = { \
.sy_narg = (sizeof(struct syscallname ## _args ) \
/ sizeof(register_t)), \
.sy_call = (sy_call_t *)& sys_ ## syscallname, \
.sy_flags = (flags) \
}, \
.syscall_no = FREEBSD32_SYS_##syscallname \
}
#define SYSCALL32_INIT_HELPER(syscallname) \
SYSCALL32_INIT_HELPER_F(syscallname, 0)
#define SYSCALL32_INIT_HELPER_COMPAT(syscallname) \
SYSCALL32_INIT_HELPER_COMPAT_F(syscallname, 0)
int syscall32_register(int *offset, struct sysent *new_sysent,
struct sysent *old_sysent, int flags);
int syscall32_deregister(int *offset, struct sysent *old_sysent);

View File

@ -253,7 +253,7 @@ dev/ixl/ixl_pf_main.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_qmgr.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_iov.c optional ixl pci \
dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_i2c.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"

File diff suppressed because it is too large Load Diff

View File

@ -51,6 +51,7 @@
#define ARCMSR_MAX_HBB_POSTQUEUE 264 /* (ARCMSR_MAX_OUTSTANDING_CMD+8) */
#define ARCMSR_MAX_HBD_POSTQUEUE 256
#define ARCMSR_TIMEOUT_DELAY 60 /* in sec */
#define ARCMSR_NUM_MSIX_VECTORS 4
/*
*********************************************************************
*/
@ -116,10 +117,12 @@
#define PCI_DEVICE_ID_ARECA_1680 0x1680 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1681 0x1681 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1880 0x1880 /* Device ID */
#define PCI_DEVICE_ID_ARECA_1884 0x1884 /* Device ID */
#define ARECA_SUB_DEV_ID_1880 0x1880 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1882 0x1882 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1883 0x1883 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1884 0x1884 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1212 0x1212 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1213 0x1213 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1222 0x1222 /* Subsystem Device ID */
@ -152,6 +155,7 @@
#define PCIDevVenIDARC1681 0x168117D3 /* Vendor Device ID */
#define PCIDevVenIDARC1880 0x188017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1882 0x188217D3 /* Vendor Device ID */
#define PCIDevVenIDARC1884 0x188417D3 /* Vendor Device ID */
#ifndef PCIR_BARS
#define PCIR_BARS 0x10
@ -460,6 +464,26 @@ struct CMD_MESSAGE_FIELD {
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
#define ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK 0x80000000
/*
*******************************************************************************
** SPEC. for Areca HBE adapter
*******************************************************************************
*/
#define ARCMSR_SIGNATURE_1884 0x188417D3
#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009 /* disable all ISR */
#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008 /* inbound message 0 ready */
#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008 /* outbound message 0 ready */
#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000 /* ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK */
/* ARC-1884 doorbell sync */
#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
/*
*********************************************************************
** Message Unit structure
@ -687,6 +711,92 @@ struct HBD_MessageUnit0 {
uint16_t doneq_index;
struct HBD_MessageUnit *phbdmu;
};
/*
*********************************************************************
**
*********************************************************************
*/
struct HBE_MessageUnit {
u_int32_t iobound_doorbell; /*0000 0003*/
u_int32_t write_sequence_3xxx; /*0004 0007*/
u_int32_t host_diagnostic_3xxx; /*0008 000B*/
u_int32_t posted_outbound_doorbell; /*000C 000F*/
u_int32_t master_error_attribute; /*0010 0013*/
u_int32_t master_error_address_low; /*0014 0017*/
u_int32_t master_error_address_high; /*0018 001B*/
u_int32_t hcb_size; /*001C 001F*/
u_int32_t inbound_doorbell; /*0020 0023*/
u_int32_t diagnostic_rw_data; /*0024 0027*/
u_int32_t diagnostic_rw_address_low; /*0028 002B*/
u_int32_t diagnostic_rw_address_high; /*002C 002F*/
u_int32_t host_int_status; /*0030 0033 host interrupt status*/
u_int32_t host_int_mask; /*0034 0037 host interrupt mask*/
u_int32_t dcr_data; /*0038 003B*/
u_int32_t dcr_address; /*003C 003F*/
u_int32_t inbound_queueport; /*0040 0043 port32 host inbound queue port*/
u_int32_t outbound_queueport; /*0044 0047 port32 host outbound queue port*/
u_int32_t hcb_pci_address_low; /*0048 004B*/
u_int32_t hcb_pci_address_high; /*004C 004F*/
u_int32_t iop_int_status; /*0050 0053*/
u_int32_t iop_int_mask; /*0054 0057*/
u_int32_t iop_inbound_queue_port; /*0058 005B*/
u_int32_t iop_outbound_queue_port; /*005C 005F*/
u_int32_t inbound_free_list_index; /*0060 0063*/
u_int32_t inbound_post_list_index; /*0064 0067*/
u_int32_t outbound_free_list_index; /*0068 006B*/
u_int32_t outbound_post_list_index; /*006C 006F*/
u_int32_t inbound_doorbell_clear; /*0070 0073*/
u_int32_t i2o_message_unit_control; /*0074 0077*/
u_int32_t last_used_message_source_address_low; /*0078 007B*/
u_int32_t last_used_message_source_address_high; /*007C 007F*/
u_int32_t pull_mode_data_byte_count[4]; /*0080 008F*/
u_int32_t message_dest_address_index; /*0090 0093*/
u_int32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
u_int32_t utility_A_int_counter_timer; /*0098 009B*/
u_int32_t outbound_doorbell; /*009C 009F*/
u_int32_t outbound_doorbell_clear; /*00A0 00A3*/
u_int32_t message_source_address_index; /*00A4 00A7*/
u_int32_t message_done_queue_index; /*00A8 00AB*/
u_int32_t reserved0; /*00AC 00AF*/
u_int32_t inbound_msgaddr0; /*00B0 00B3 scratchpad0*/
u_int32_t inbound_msgaddr1; /*00B4 00B7 scratchpad1*/
u_int32_t outbound_msgaddr0; /*00B8 00BB scratchpad2*/
u_int32_t outbound_msgaddr1; /*00BC 00BF scratchpad3*/
u_int32_t inbound_queueport_low; /*00C0 00C3 port64 host inbound queue port low*/
u_int32_t inbound_queueport_high; /*00C4 00C7 port64 host inbound queue port high*/
u_int32_t outbound_queueport_low; /*00C8 00CB port64 host outbound queue port low*/
u_int32_t outbound_queueport_high; /*00CC 00CF port64 host outbound queue port high*/
u_int32_t iop_inbound_queue_port_low; /*00D0 00D3*/
u_int32_t iop_inbound_queue_port_high; /*00D4 00D7*/
u_int32_t iop_outbound_queue_port_low; /*00D8 00DB*/
u_int32_t iop_outbound_queue_port_high; /*00DC 00DF*/
u_int32_t message_dest_queue_port_low; /*00E0 00E3*/
u_int32_t message_dest_queue_port_high; /*00E4 00E7*/
u_int32_t last_used_message_dest_address_low; /*00E8 00EB*/
u_int32_t last_used_message_dest_address_high; /*00EC 00EF*/
u_int32_t message_done_queue_base_address_low; /*00F0 00F3*/
u_int32_t message_done_queue_base_address_high; /*00F4 00F7*/
u_int32_t host_diagnostic; /*00F8 00FB*/
u_int32_t write_sequence; /*00FC 00FF*/
u_int32_t reserved1[46]; /*0100 01B7*/
u_int32_t reply_post_producer_index; /*01B8 01BB*/
u_int32_t reply_post_consumer_index; /*01BC 01BF*/
u_int32_t reserved2[1936]; /*01C0 1FFF*/
u_int32_t message_wbuffer[32]; /*2000 207F*/
u_int32_t reserved3[32]; /*2080 20FF*/
u_int32_t message_rbuffer[32]; /*2100 217F*/
u_int32_t reserved4[32]; /*2180 21FF*/
u_int32_t msgcode_rwbuffer[256]; /*2200 23FF*/
};
typedef struct deliver_completeQ {
u_int16_t cmdFlag;
u_int16_t cmdSMID;
u_int16_t cmdLMID; // reserved (0)
u_int16_t cmdFlag2; // reserved (0)
} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
#define COMPLETION_Q_POOL_SIZE (sizeof(struct deliver_completeQ) * 512 + 128)
/*
*********************************************************************
@ -700,6 +810,7 @@ struct MessageUnit_UNION
struct HBB_MessageUnit hbbmu;
struct HBC_MessageUnit hbcmu;
struct HBD_MessageUnit0 hbdmu;
struct HBE_MessageUnit hbemu;
} muu;
};
/*
@ -1089,6 +1200,7 @@ struct CommandControlBlock {
u_int16_t srb_state; /* 538-539 */
u_int32_t cdb_phyaddr_high; /* 540-543 */
struct callout ccb_callout;
u_int32_t smid;
/* ========================================================== */
};
/* srb_flags */
@ -1121,10 +1233,11 @@ struct CommandControlBlock {
** Adapter Control Block
*********************************************************************
*/
#define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
#define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
#define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc L IOP */
#define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd M IOP */
#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
#define ACB_ADAPTER_TYPE_E 0x00000004 /* hbd L IOP */
struct AdapterControlBlock {
u_int32_t adapter_type; /* adapter A,B..... */
@ -1144,8 +1257,9 @@ struct AdapterControlBlock {
int pci_unit;
struct resource *sys_res_arcmsr[2];
struct resource *irqres;
void *ih; /* interrupt handle */
struct resource *irqres[ARCMSR_NUM_MSIX_VECTORS];
void *ih[ARCMSR_NUM_MSIX_VECTORS]; /* interrupt handle */
int irq_id[ARCMSR_NUM_MSIX_VECTORS];
/* Hooks into the CAM XPT */
struct cam_sim *psim;
@ -1206,6 +1320,13 @@ struct AdapterControlBlock {
u_int32_t adapter_bus_speed;
u_int32_t maxOutstanding;
u_int16_t sub_device_id;
u_int32_t doneq_index;
u_int32_t in_doorbell;
u_int32_t out_doorbell;
u_int32_t completionQ_entry;
pCompletion_Q pCompletionQ;
int msix_vectors;
int rid;
};/* HW_DEVICE_EXTENSION */
/* acb_flags */
#define ACB_F_SCSISTOPADAPTER 0x0001
@ -1221,6 +1342,7 @@ struct AdapterControlBlock {
#define ACB_F_CAM_DEV_QFRZN 0x0400
#define ACB_F_BUS_HANG_ON 0x0800 /* need hardware reset bus */
#define ACB_F_SRB_FUNCTION_POWER 0x1000
#define ACB_F_MSIX_ENABLED 0x2000
/* devstate */
#define ARECA_RAID_GONE 0x55
#define ARECA_RAID_GOOD 0xaa

View File

@ -538,6 +538,4 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */

View File

@ -1074,7 +1074,7 @@ ixlv_setup_vc(struct ixlv_sc *sc)
if (error == ETIMEDOUT) {
if (!send_api_ver_retried) {
/* Resend message, one more time */
send_api_ver_retried++;
send_api_ver_retried = true;
device_printf(dev,
"%s: Timeout while verifying API version on first"
" try!\n", __func__);

View File

@ -664,8 +664,7 @@ struct ixl_sysctl_info {
char *description;
};
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
extern const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN];
/*********************************************************************
* TXRX Function prototypes

View File

@ -168,16 +168,9 @@ struct ixl_pf {
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
static char *ixl_fc_string[6] = {
"None",
"Rx",
"Tx",
"Full",
"Priority",
"Default"
};
extern const char * const ixl_fc_string[6];
static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
MALLOC_DECLARE(M_IXL);
/*** Functions / Macros ***/
/* Adjust the level here to 10 or over to print stats messages */
@ -299,8 +292,8 @@ int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
void ixl_set_queue_rx_itr(struct ixl_queue *);
void ixl_set_queue_tx_itr(struct ixl_queue *);
void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
void ixl_add_filter(struct ixl_vsi *, const u8 *, s16 vlan);
void ixl_del_filter(struct ixl_vsi *, const u8 *, s16 vlan);
void ixl_reconfigure_filters(struct ixl_vsi *vsi);
int ixl_disable_rings(struct ixl_vsi *);
@ -331,7 +324,7 @@ void ixl_init_filters(struct ixl_vsi *);
void ixl_add_hw_filters(struct ixl_vsi *, int, int);
void ixl_del_hw_filters(struct ixl_vsi *, int);
struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *, u8 *, s16);
ixl_find_filter(struct ixl_vsi *, const u8 *, s16);
void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);

View File

@ -89,6 +89,20 @@ static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
extern int ixl_enable_iwarp;
#endif
const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const char * const ixl_fc_string[6] = {
"None",
"Rx",
"Tx",
"Full",
"Priority",
"Default"
};
MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
void
ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
{
@ -3193,7 +3207,7 @@ ixl_reconfigure_filters(struct ixl_vsi *vsi)
** This routine adds macvlan filters
*/
void
ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f, *tmp;
struct ixl_pf *pf;
@ -3239,7 +3253,7 @@ ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
}
void
ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
@ -3264,7 +3278,7 @@ ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
** Find the filter with both matching mac addr and vlan id
*/
struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
bool match = FALSE;

View File

@ -63,8 +63,6 @@ static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
static inline bool ixl_tso_detect_sparse(struct mbuf *mp);
static int ixl_tx_setup_offload(struct ixl_queue *que,
struct mbuf *mp, u32 *cmd, u32 *off);
static inline u32 ixl_get_tx_head(struct ixl_queue *que);
#ifdef DEV_NETMAP
@ -1578,6 +1576,18 @@ ixl_rxeof(struct ixl_queue *que, int count)
else
vtag = 0;
/* Remove device access to the rx buffers. */
if (rbuf->m_head != NULL) {
bus_dmamap_sync(rxr->htag, rbuf->hmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->htag, rbuf->hmap);
}
if (rbuf->m_pack != NULL) {
bus_dmamap_sync(rxr->ptag, rbuf->pmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->ptag, rbuf->pmap);
}
/*
** Make sure bad packets are discarded,
** note that only EOP descriptor has valid

View File

@ -110,7 +110,10 @@ nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));
nvme_ctrlr_submit_io_request(ctrlr, req);
if (ccb->ccb_h.func_code == XPT_NVME_IO)
nvme_ctrlr_submit_io_request(ctrlr, req);
else
nvme_ctrlr_submit_admin_request(ctrlr, req);
ccb->ccb_h.status |= CAM_SIM_QUEUED;
}
@ -225,6 +228,7 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_NVME_IO: /* Execute the requested I/O operation */
case XPT_NVME_ADMIN: /* or Admin operation */
nvme_sim_nvmeio(sim, ccb);
return; /* no done */
default:

View File

@ -350,6 +350,8 @@ kill
##
## Allow message queue operations on file descriptors, subject to capability
## rights.
## NOTE: Corresponding sysents are initialized in sys/kern/uipc_mqueue.c with
## SYF_CAPENABLED.
##
kmq_notify
kmq_setattr
@ -545,6 +547,8 @@ sched_yield
##
## Allow I/O-related file descriptors, subject to capability rights.
## NOTE: Corresponding sysents are initialized in sys/netinet/sctp_syscalls.c
## with SYF_CAPENABLED.
##
sctp_generic_recvmsg
sctp_generic_sendmsg

View File

@ -218,6 +218,16 @@ _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
sglist_cnt = 0;
break;
}
case XPT_NVME_IO:
case XPT_NVME_ADMIN: {
struct ccb_nvmeio *nvmeio;
nvmeio = &ccb->nvmeio;
data_ptr = nvmeio->data_ptr;
dxfer_len = nvmeio->dxfer_len;
sglist_cnt = 0;
break;
}
default:
panic("_bus_dmamap_load_ccb: Unsupported func code %d",
ccb_h->func_code);

View File

@ -2708,10 +2708,10 @@ static struct vfsconf mqueuefs_vfsconf = {
static struct syscall_helper_data mq_syscalls[] = {
SYSCALL_INIT_HELPER(kmq_open),
SYSCALL_INIT_HELPER(kmq_setattr),
SYSCALL_INIT_HELPER(kmq_timedsend),
SYSCALL_INIT_HELPER(kmq_timedreceive),
SYSCALL_INIT_HELPER(kmq_notify),
SYSCALL_INIT_HELPER_F(kmq_setattr, SYF_CAPENABLED),
SYSCALL_INIT_HELPER_F(kmq_timedsend, SYF_CAPENABLED),
SYSCALL_INIT_HELPER_F(kmq_timedreceive, SYF_CAPENABLED),
SYSCALL_INIT_HELPER_F(kmq_notify, SYF_CAPENABLED),
SYSCALL_INIT_HELPER(kmq_unlink),
SYSCALL_INIT_LAST
};
@ -2870,10 +2870,10 @@ freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap)
static struct syscall_helper_data mq32_syscalls[] = {
SYSCALL32_INIT_HELPER(freebsd32_kmq_open),
SYSCALL32_INIT_HELPER(freebsd32_kmq_setattr),
SYSCALL32_INIT_HELPER(freebsd32_kmq_timedsend),
SYSCALL32_INIT_HELPER(freebsd32_kmq_timedreceive),
SYSCALL32_INIT_HELPER(freebsd32_kmq_notify),
SYSCALL32_INIT_HELPER_F(freebsd32_kmq_setattr, SYF_CAPENABLED),
SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedsend, SYF_CAPENABLED),
SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedreceive, SYF_CAPENABLED),
SYSCALL32_INIT_HELPER_F(freebsd32_kmq_notify, SYF_CAPENABLED),
SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink),
SYSCALL_INIT_LAST
};

View File

@ -3,10 +3,11 @@
.PATH: ${SRCTOP}/sys/dev/ixl
KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
SRCS += ixl_pf_iov.c ixl_iw.c
SRCS += ixl_iw.c
SRCS.PCI_IOV= pci_iov_if.h ixl_pf_iov.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c

View File

@ -82,10 +82,10 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_peeloff.h>
static struct syscall_helper_data sctp_syscalls[] = {
SYSCALL_INIT_HELPER(sctp_peeloff),
SYSCALL_INIT_HELPER(sctp_generic_sendmsg),
SYSCALL_INIT_HELPER(sctp_generic_sendmsg_iov),
SYSCALL_INIT_HELPER(sctp_generic_recvmsg),
SYSCALL_INIT_HELPER_F(sctp_peeloff, SYF_CAPENABLED),
SYSCALL_INIT_HELPER_F(sctp_generic_sendmsg, SYF_CAPENABLED),
SYSCALL_INIT_HELPER_F(sctp_generic_sendmsg_iov, SYF_CAPENABLED),
SYSCALL_INIT_HELPER_F(sctp_generic_recvmsg, SYF_CAPENABLED),
SYSCALL_INIT_LAST
};

View File

@ -230,24 +230,30 @@ struct syscall_helper_data {
int syscall_no;
int registered;
};
#define SYSCALL_INIT_HELPER(syscallname) { \
#define SYSCALL_INIT_HELPER_F(syscallname, flags) { \
.new_sysent = { \
.sy_narg = (sizeof(struct syscallname ## _args ) \
/ sizeof(register_t)), \
.sy_call = (sy_call_t *)& sys_ ## syscallname, \
.sy_auevent = SYS_AUE_##syscallname \
.sy_auevent = SYS_AUE_##syscallname, \
.sy_flags = (flags) \
}, \
.syscall_no = SYS_##syscallname \
}
#define SYSCALL_INIT_HELPER_COMPAT(syscallname) { \
#define SYSCALL_INIT_HELPER_COMPAT_F(syscallname, flags) { \
.new_sysent = { \
.sy_narg = (sizeof(struct syscallname ## _args ) \
/ sizeof(register_t)), \
.sy_call = (sy_call_t *)& syscallname, \
.sy_auevent = SYS_AUE_##syscallname \
.sy_auevent = SYS_AUE_##syscallname, \
.sy_flags = (flags) \
}, \
.syscall_no = SYS_##syscallname \
}
#define SYSCALL_INIT_HELPER(syscallname) \
SYSCALL_INIT_HELPER_F(syscallname, 0)
#define SYSCALL_INIT_HELPER_COMPAT(syscallname) \
SYSCALL_INIT_HELPER_COMPAT_F(syscallname, 0)
#define SYSCALL_INIT_LAST { \
.syscall_no = NO_SYSCALL \
}

View File

@ -1962,7 +1962,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
(pagesizes[p->psind] - 1)) == 0) {
mask = atop(pagesizes[p->psind]) - 1;
if (tmpidx + mask < psize &&
vm_page_ps_is_valid(p)) {
vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
p += mask;
threshold += mask;
}

View File

@ -3472,12 +3472,11 @@ vm_page_is_valid(vm_page_t m, int base, int size)
}
/*
* vm_page_ps_is_valid:
*
* Returns TRUE if the entire (super)page is valid and FALSE otherwise.
* Returns true if all of the specified predicates are true for the entire
* (super)page and false otherwise.
*/
boolean_t
vm_page_ps_is_valid(vm_page_t m)
bool
vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
{
int i, npages;
@ -3490,10 +3489,25 @@ vm_page_ps_is_valid(vm_page_t m)
* occupy adjacent entries in vm_page_array[].
*/
for (i = 0; i < npages; i++) {
if (m[i].valid != VM_PAGE_BITS_ALL)
return (FALSE);
if (&m[i] == skip_m)
continue;
if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
return (false);
if ((flags & PS_ALL_DIRTY) != 0) {
/*
* Calling vm_page_test_dirty() or pmap_is_modified()
* might stop this case from spuriously returning
* "false". However, that would require a write lock
* on the object containing "m[i]".
*/
if (m[i].dirty != VM_PAGE_BITS_ALL)
return (false);
}
if ((flags & PS_ALL_VALID) != 0 &&
m[i].valid != VM_PAGE_BITS_ALL)
return (false);
}
return (TRUE);
return (true);
}
/*

View File

@ -438,6 +438,18 @@ malloc2vm_flags(int malloc_flags)
}
#endif
/*
* Predicates supported by vm_page_ps_test():
*
* PS_ALL_DIRTY is true only if the entire (super)page is dirty.
* However, it can be spuriously false when the (super)page has become
* dirty in the pmap but that information has not been propagated to the
* machine-independent layer.
*/
#define PS_ALL_DIRTY 0x1
#define PS_ALL_VALID 0x2
#define PS_NONE_BUSY 0x4
void vm_page_busy_downgrade(vm_page_t m);
void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
void vm_page_flash(vm_page_t m);
@ -469,7 +481,7 @@ vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
vm_page_t vm_page_prev(vm_page_t m);
boolean_t vm_page_ps_is_valid(vm_page_t m);
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
void vm_page_putfake(vm_page_t m);
void vm_page_readahead_finish(vm_page_t m);
bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,

View File

@ -3,11 +3,12 @@
TESTSDIR= ${TESTSBASE}/sys/aio
ATF_TESTS_C+= aio_test
TEST_METADATA.aio_test+= timeout="30"
PLAIN_TESTS_C+= aio_kqueue_test
PLAIN_TESTS_C+= lio_kqueue_test
LIBADD.aio_test+= util
LIBADD.aio_test+= util rt
CFLAGS+= -I${.CURDIR:H:H}

View File

@ -32,10 +32,11 @@
* size buffer with pseudo-random data, writing it to one fd using AIO, then
* reading it from a second descriptor using AIO. For some targets, the same
* fd is used for write and read (i.e., file, md device), but for others the
* operation is performed on a peer (pty, socket, fifo, etc). A timeout is
* initiated to detect undue blocking. This test does not attempt to exercise
* error cases or more subtle asynchronous behavior, just make sure that the
* basic operations work on some basic object types.
* operation is performed on a peer (pty, socket, fifo, etc). For each file
* descriptor type, several completion methods are tested. This test program
* does not attempt to exercise error cases or more subtle asynchronous
* behavior, just make sure that the basic operations work on some basic object
* types.
*/
#include <sys/param.h>
@ -51,6 +52,7 @@
#include <fcntl.h>
#include <libutil.h>
#include <limits.h>
#include <semaphore.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
@ -85,41 +87,10 @@ struct aio_context {
char ac_buffer[GLOBAL_MAX];
int ac_buflen;
int ac_seconds;
void (*ac_cleanup)(void *arg);
void *ac_cleanup_arg;
};
static int aio_timedout;
static sem_t completions;
/*
* Each test run specifies a timeout in seconds. Use the somewhat obsoleted
* signal(3) and alarm(3) APIs to set this up.
*/
static void
aio_timeout_signal(int sig __unused)
{
aio_timedout = 1;
}
static void
aio_timeout_start(int seconds)
{
aio_timedout = 0;
ATF_REQUIRE_MSG(signal(SIGALRM, aio_timeout_signal) != SIG_ERR,
"failed to set SIGALRM handler: %s", strerror(errno));
alarm(seconds);
}
static void
aio_timeout_stop(void)
{
ATF_REQUIRE_MSG(signal(SIGALRM, NULL) != SIG_ERR,
"failed to reset SIGALRM handler to default: %s", strerror(errno));
alarm(0);
}
/*
* Fill a buffer given a seed that can be fed into srandom() to initialize
@ -163,8 +134,7 @@ aio_test_buffer(char *buffer, int len, long seed)
*/
static void
aio_context_init(struct aio_context *ac, int read_fd,
int write_fd, int buflen, int seconds, void (*cleanup)(void *),
void *cleanup_arg)
int write_fd, int buflen)
{
ATF_REQUIRE_MSG(buflen <= BUFFER_MAX,
@ -179,9 +149,6 @@ aio_context_init(struct aio_context *ac, int read_fd,
aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed);
ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen,
ac->ac_seed) != 0, "aio_test_buffer: internal error");
ac->ac_seconds = seconds;
ac->ac_cleanup = cleanup;
ac->ac_cleanup_arg = cleanup_arg;
}
static ssize_t
@ -189,7 +156,7 @@ poll(struct aiocb *aio)
{
int error;
while ((error = aio_error(aio)) == EINPROGRESS && !aio_timedout)
while ((error = aio_error(aio)) == EINPROGRESS)
usleep(25000);
switch (error) {
case EINPROGRESS:
@ -202,6 +169,70 @@ poll(struct aiocb *aio)
}
}
static void
sigusr1_handler(int sig __unused)
{
ATF_REQUIRE_EQ(0, sem_post(&completions));
}
static void
thr_handler(union sigval sv __unused)
{
ATF_REQUIRE_EQ(0, sem_post(&completions));
}
static ssize_t
poll_signaled(struct aiocb *aio)
{
int error;
ATF_REQUIRE_EQ(0, sem_wait(&completions));
error = aio_error(aio);
switch (error) {
case EINPROGRESS:
errno = EINTR;
return (-1);
case 0:
return (aio_return(aio));
default:
return (error);
}
}
/*
* Setup a signal handler for signal delivery tests
* This isn't thread safe, but it's ok since ATF runs each testcase in a
* separate process
*/
static struct sigevent*
setup_signal(void)
{
static struct sigevent sev;
ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0));
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGUSR1;
ATF_REQUIRE(SIG_ERR != signal(SIGUSR1, sigusr1_handler));
return (&sev);
}
/*
* Setup a thread for thread delivery tests
* This isn't thread safe, but it's ok since ATF runs each testcase in a
* separate process
*/
static struct sigevent*
setup_thread()
{
static struct sigevent sev;
ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0));
sev.sigev_notify = SIGEV_THREAD;
sev.sigev_notify_function = thr_handler;
sev.sigev_notify_attributes = NULL;
return (&sev);
}
static ssize_t
suspend(struct aiocb *aio)
{
@ -226,29 +257,12 @@ waitcomplete(struct aiocb *aio)
return (ret);
}
/*
* Each tester can register a callback to clean up in the event the test
* fails. Preserve the value of errno so that subsequent calls to errx()
* work properly.
*/
static void
aio_cleanup(struct aio_context *ac)
{
int error;
if (ac->ac_cleanup == NULL)
return;
error = errno;
(ac->ac_cleanup)(ac->ac_cleanup_arg);
errno = error;
}
/*
* Perform a simple write test of our initialized data buffer to the provided
* file descriptor.
*/
static void
aio_write_test(struct aio_context *ac, completion comp)
aio_write_test(struct aio_context *ac, completion comp, struct sigevent *sev)
{
struct aiocb aio;
ssize_t len;
@ -258,38 +272,18 @@ aio_write_test(struct aio_context *ac, completion comp)
aio.aio_nbytes = ac->ac_buflen;
aio.aio_fildes = ac->ac_write_fd;
aio.aio_offset = 0;
if (sev)
aio.aio_sigevent = *sev;
aio_timeout_start(ac->ac_seconds);
if (aio_write(&aio) < 0) {
if (errno == EINTR) {
if (aio_timedout) {
aio_cleanup(ac);
atf_tc_fail("aio_write timed out");
}
}
aio_cleanup(ac);
if (aio_write(&aio) < 0)
atf_tc_fail("aio_write failed: %s", strerror(errno));
}
len = comp(&aio);
if (len < 0) {
if (errno == EINTR) {
if (aio_timedout) {
aio_cleanup(ac);
atf_tc_fail("aio timed out");
}
}
aio_cleanup(ac);
if (len < 0)
atf_tc_fail("aio failed: %s", strerror(errno));
}
aio_timeout_stop();
if (len != ac->ac_buflen) {
aio_cleanup(ac);
if (len != ac->ac_buflen)
atf_tc_fail("aio short write (%jd)", (intmax_t)len);
}
}
/*
@ -297,7 +291,7 @@ aio_write_test(struct aio_context *ac, completion comp)
* provided file descriptor.
*/
static void
aio_read_test(struct aio_context *ac, completion comp)
aio_read_test(struct aio_context *ac, completion comp, struct sigevent *sev)
{
struct aiocb aio;
ssize_t len;
@ -308,44 +302,21 @@ aio_read_test(struct aio_context *ac, completion comp)
aio.aio_nbytes = ac->ac_buflen;
aio.aio_fildes = ac->ac_read_fd;
aio.aio_offset = 0;
if (sev)
aio.aio_sigevent = *sev;
aio_timeout_start(ac->ac_seconds);
if (aio_read(&aio) < 0) {
if (errno == EINTR) {
if (aio_timedout) {
aio_cleanup(ac);
atf_tc_fail("aio_read timed out");
}
}
aio_cleanup(ac);
if (aio_read(&aio) < 0)
atf_tc_fail("aio_read failed: %s", strerror(errno));
}
len = comp(&aio);
if (len < 0) {
if (errno == EINTR) {
if (aio_timedout) {
aio_cleanup(ac);
atf_tc_fail("aio timed out");
}
}
aio_cleanup(ac);
if (len < 0)
atf_tc_fail("aio failed: %s", strerror(errno));
}
aio_timeout_stop();
ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen,
"aio short read (%jd)", (intmax_t)len);
if (len != ac->ac_buflen) {
aio_cleanup(ac);
atf_tc_fail("aio short read (%jd)",
(intmax_t)len);
}
if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) {
aio_cleanup(ac);
if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0)
atf_tc_fail("buffer mismatched");
}
}
/*
@ -360,25 +331,10 @@ aio_read_test(struct aio_context *ac, completion comp)
*/
#define FILE_LEN GLOBAL_MAX
#define FILE_PATHNAME "testfile"
#define FILE_TIMEOUT 30
struct aio_file_arg {
int afa_fd;
};
static void
aio_file_cleanup(void *arg)
aio_file_test(completion comp, struct sigevent *sev)
{
struct aio_file_arg *afa;
afa = arg;
close(afa->afa_fd);
unlink(FILE_PATHNAME);
}
static void
aio_file_test(completion comp)
{
struct aio_file_arg arg;
struct aio_context ac;
int fd;
@ -388,60 +344,49 @@ aio_file_test(completion comp)
fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
arg.afa_fd = fd;
aio_context_init(&ac, fd, fd, FILE_LEN,
FILE_TIMEOUT, aio_file_cleanup, &arg);
aio_write_test(&ac, comp);
aio_read_test(&ac, comp);
aio_file_cleanup(&arg);
aio_context_init(&ac, fd, fd, FILE_LEN);
aio_write_test(&ac, comp, sev);
aio_read_test(&ac, comp, sev);
close(fd);
}
ATF_TC_WITHOUT_HEAD(file_poll);
ATF_TC_BODY(file_poll, tc)
{
aio_file_test(poll);
aio_file_test(poll, NULL);
}
ATF_TC_WITHOUT_HEAD(file_signal);
ATF_TC_BODY(file_signal, tc)
{
aio_file_test(poll_signaled, setup_signal());
}
ATF_TC_WITHOUT_HEAD(file_suspend);
ATF_TC_BODY(file_suspend, tc)
{
aio_file_test(suspend);
aio_file_test(suspend, NULL);
}
ATF_TC_WITHOUT_HEAD(file_thread);
ATF_TC_BODY(file_thread, tc)
{
aio_file_test(poll_signaled, setup_thread());
}
ATF_TC_WITHOUT_HEAD(file_waitcomplete);
ATF_TC_BODY(file_waitcomplete, tc)
{
aio_file_test(waitcomplete);
aio_file_test(waitcomplete, NULL);
}
#define FIFO_LEN 256
#define FIFO_PATHNAME "testfifo"
#define FIFO_TIMEOUT 30
struct aio_fifo_arg {
int afa_read_fd;
int afa_write_fd;
};
static void
aio_fifo_cleanup(void *arg)
{
struct aio_fifo_arg *afa;
afa = arg;
if (afa->afa_read_fd != -1)
close(afa->afa_read_fd);
if (afa->afa_write_fd != -1)
close(afa->afa_write_fd);
unlink(FIFO_PATHNAME);
}
static void
aio_fifo_test(completion comp)
aio_fifo_test(completion comp, struct sigevent *sev)
{
int error, read_fd = -1, write_fd = -1;
struct aio_fifo_arg arg;
struct aio_context ac;
ATF_REQUIRE_KERNEL_MODULE("aio");
@ -449,75 +394,65 @@ aio_fifo_test(completion comp)
ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1,
"mkfifo failed: %s", strerror(errno));
arg.afa_read_fd = -1;
arg.afa_write_fd = -1;
read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK);
if (read_fd == -1) {
error = errno;
aio_fifo_cleanup(&arg);
errno = error;
atf_tc_fail("read_fd open failed: %s",
strerror(errno));
}
arg.afa_read_fd = read_fd;
write_fd = open(FIFO_PATHNAME, O_WRONLY);
if (write_fd == -1) {
error = errno;
aio_fifo_cleanup(&arg);
errno = error;
atf_tc_fail("write_fd open failed: %s",
strerror(errno));
}
arg.afa_write_fd = write_fd;
aio_context_init(&ac, read_fd, write_fd, FIFO_LEN,
FIFO_TIMEOUT, aio_fifo_cleanup, &arg);
aio_write_test(&ac, comp);
aio_read_test(&ac, comp);
aio_context_init(&ac, read_fd, write_fd, FIFO_LEN);
aio_write_test(&ac, comp, sev);
aio_read_test(&ac, comp, sev);
aio_fifo_cleanup(&arg);
close(read_fd);
close(write_fd);
}
ATF_TC_WITHOUT_HEAD(fifo_poll);
ATF_TC_BODY(fifo_poll, tc)
{
aio_fifo_test(poll);
aio_fifo_test(poll, NULL);
}
ATF_TC_WITHOUT_HEAD(fifo_signal);
ATF_TC_BODY(fifo_signal, tc)
{
aio_fifo_test(poll_signaled, setup_signal());
}
ATF_TC_WITHOUT_HEAD(fifo_suspend);
ATF_TC_BODY(fifo_suspend, tc)
{
aio_fifo_test(waitcomplete);
aio_fifo_test(suspend, NULL);
}
ATF_TC_WITHOUT_HEAD(fifo_thread);
ATF_TC_BODY(fifo_thread, tc)
{
aio_fifo_test(poll_signaled, setup_thread());
}
ATF_TC_WITHOUT_HEAD(fifo_waitcomplete);
ATF_TC_BODY(fifo_waitcomplete, tc)
{
aio_fifo_test(waitcomplete);
}
struct aio_unix_socketpair_arg {
int asa_sockets[2];
};
static void
aio_unix_socketpair_cleanup(void *arg)
{
struct aio_unix_socketpair_arg *asa;
asa = arg;
close(asa->asa_sockets[0]);
close(asa->asa_sockets[1]);
aio_fifo_test(waitcomplete, NULL);
}
#define UNIX_SOCKETPAIR_LEN 256
#define UNIX_SOCKETPAIR_TIMEOUT 30
static void
aio_unix_socketpair_test(completion comp)
aio_unix_socketpair_test(completion comp, struct sigevent *sev)
{
struct aio_unix_socketpair_arg arg;
struct aio_context ac;
struct rusage ru_before, ru_after;
int sockets[2];
@ -527,42 +462,51 @@ aio_unix_socketpair_test(completion comp)
ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1,
"socketpair failed: %s", strerror(errno));
arg.asa_sockets[0] = sockets[0];
arg.asa_sockets[1] = sockets[1];
aio_context_init(&ac, sockets[0],
sockets[1], UNIX_SOCKETPAIR_LEN, UNIX_SOCKETPAIR_TIMEOUT,
aio_unix_socketpair_cleanup, &arg);
aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN);
ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1,
"getrusage failed: %s", strerror(errno));
aio_write_test(&ac, comp);
aio_write_test(&ac, comp, sev);
ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
"getrusage failed: %s", strerror(errno));
ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1);
ru_before = ru_after;
aio_read_test(&ac, comp);
aio_read_test(&ac, comp, sev);
ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
"getrusage failed: %s", strerror(errno));
ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1);
aio_unix_socketpair_cleanup(&arg);
close(sockets[0]);
close(sockets[1]);
}
ATF_TC_WITHOUT_HEAD(socket_poll);
ATF_TC_BODY(socket_poll, tc)
{
aio_unix_socketpair_test(poll);
aio_unix_socketpair_test(poll, NULL);
}
ATF_TC_WITHOUT_HEAD(socket_signal);
ATF_TC_BODY(socket_signal, tc)
{
aio_unix_socketpair_test(poll_signaled, setup_signal());
}
ATF_TC_WITHOUT_HEAD(socket_suspend);
ATF_TC_BODY(socket_suspend, tc)
{
aio_unix_socketpair_test(suspend);
aio_unix_socketpair_test(suspend, NULL);
}
ATF_TC_WITHOUT_HEAD(socket_thread);
ATF_TC_BODY(socket_thread, tc)
{
aio_unix_socketpair_test(poll_signaled, setup_thread());
}
ATF_TC_WITHOUT_HEAD(socket_waitcomplete);
ATF_TC_BODY(socket_waitcomplete, tc)
{
aio_unix_socketpair_test(waitcomplete);
aio_unix_socketpair_test(waitcomplete, NULL);
}
struct aio_pty_arg {
@ -570,22 +514,10 @@ struct aio_pty_arg {
int apa_write_fd;
};
static void
aio_pty_cleanup(void *arg)
{
struct aio_pty_arg *apa;
apa = arg;
close(apa->apa_read_fd);
close(apa->apa_write_fd);
};
#define PTY_LEN 256
#define PTY_TIMEOUT 30
static void
aio_pty_test(completion comp)
aio_pty_test(completion comp, struct sigevent *sev)
{
struct aio_pty_arg arg;
struct aio_context ac;
int read_fd, write_fd;
struct termios ts;
@ -597,62 +529,60 @@ aio_pty_test(completion comp)
ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0,
"openpty failed: %s", strerror(errno));
arg.apa_read_fd = read_fd;
arg.apa_write_fd = write_fd;
if (tcgetattr(write_fd, &ts) < 0) {
error = errno;
aio_pty_cleanup(&arg);
errno = error;
atf_tc_fail("tcgetattr failed: %s", strerror(errno));
}
cfmakeraw(&ts);
if (tcsetattr(write_fd, TCSANOW, &ts) < 0) {
error = errno;
aio_pty_cleanup(&arg);
errno = error;
atf_tc_fail("tcsetattr failed: %s", strerror(errno));
}
aio_context_init(&ac, read_fd, write_fd, PTY_LEN,
PTY_TIMEOUT, aio_pty_cleanup, &arg);
aio_context_init(&ac, read_fd, write_fd, PTY_LEN);
aio_write_test(&ac, comp);
aio_read_test(&ac, comp);
aio_write_test(&ac, comp, sev);
aio_read_test(&ac, comp, sev);
aio_pty_cleanup(&arg);
close(read_fd);
close(write_fd);
}
ATF_TC_WITHOUT_HEAD(pty_poll);
ATF_TC_BODY(pty_poll, tc)
{
aio_pty_test(poll);
aio_pty_test(poll, NULL);
}
ATF_TC_WITHOUT_HEAD(pty_signal);
ATF_TC_BODY(pty_signal, tc)
{
aio_pty_test(poll_signaled, setup_signal());
}
ATF_TC_WITHOUT_HEAD(pty_suspend);
ATF_TC_BODY(pty_suspend, tc)
{
aio_pty_test(suspend);
aio_pty_test(suspend, NULL);
}
ATF_TC_WITHOUT_HEAD(pty_thread);
ATF_TC_BODY(pty_thread, tc)
{
aio_pty_test(poll_signaled, setup_thread());
}
ATF_TC_WITHOUT_HEAD(pty_waitcomplete);
ATF_TC_BODY(pty_waitcomplete, tc)
{
aio_pty_test(waitcomplete);
}
static void
aio_pipe_cleanup(void *arg)
{
int *pipes = arg;
close(pipes[0]);
close(pipes[1]);
aio_pty_test(waitcomplete, NULL);
}
#define PIPE_LEN 256
#define PIPE_TIMEOUT 30
static void
aio_pipe_test(completion comp)
aio_pipe_test(completion comp, struct sigevent *sev)
{
struct aio_context ac;
int pipes[2];
@ -663,76 +593,83 @@ aio_pipe_test(completion comp)
ATF_REQUIRE_MSG(pipe(pipes) != -1,
"pipe failed: %s", strerror(errno));
aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN,
PIPE_TIMEOUT, aio_pipe_cleanup, pipes);
aio_write_test(&ac, comp);
aio_read_test(&ac, comp);
aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN);
aio_write_test(&ac, comp, sev);
aio_read_test(&ac, comp, sev);
aio_pipe_cleanup(pipes);
close(pipes[0]);
close(pipes[1]);
}
ATF_TC_WITHOUT_HEAD(pipe_poll);
ATF_TC_BODY(pipe_poll, tc)
{
aio_pipe_test(poll);
aio_pipe_test(poll, NULL);
}
ATF_TC_WITHOUT_HEAD(pipe_signal);
ATF_TC_BODY(pipe_signal, tc)
{
aio_pipe_test(poll_signaled, setup_signal());
}
ATF_TC_WITHOUT_HEAD(pipe_suspend);
ATF_TC_BODY(pipe_suspend, tc)
{
aio_pipe_test(suspend);
aio_pipe_test(suspend, NULL);
}
ATF_TC_WITHOUT_HEAD(pipe_thread);
ATF_TC_BODY(pipe_thread, tc)
{
aio_pipe_test(poll_signaled, setup_thread());
}
ATF_TC_WITHOUT_HEAD(pipe_waitcomplete);
ATF_TC_BODY(pipe_waitcomplete, tc)
{
aio_pipe_test(waitcomplete);
}
struct aio_md_arg {
int ama_mdctl_fd;
int ama_unit;
int ama_fd;
};
static void
aio_md_cleanup(void *arg)
{
struct aio_md_arg *ama;
struct md_ioctl mdio;
int error;
ama = arg;
if (ama->ama_fd != -1)
close(ama->ama_fd);
if (ama->ama_unit != -1) {
bzero(&mdio, sizeof(mdio));
mdio.md_version = MDIOVERSION;
mdio.md_unit = ama->ama_unit;
if (ioctl(ama->ama_mdctl_fd, MDIOCDETACH, &mdio) == -1) {
error = errno;
close(ama->ama_mdctl_fd);
errno = error;
atf_tc_fail("ioctl MDIOCDETACH failed: %s",
strerror(errno));
}
}
close(ama->ama_mdctl_fd);
aio_pipe_test(waitcomplete, NULL);
}
#define MD_LEN GLOBAL_MAX
#define MD_TIMEOUT 30
#define MDUNIT_LINK "mdunit_link"
static void
aio_md_test(completion comp)
aio_md_cleanup(void)
{
struct md_ioctl mdio;
int mdctl_fd, error, n, unit;
char buf[80];
mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0);
ATF_REQUIRE(mdctl_fd >= 0);
n = readlink(MDUNIT_LINK, buf, sizeof(buf));
if (n > 0) {
if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) {
bzero(&mdio, sizeof(mdio));
mdio.md_version = MDIOVERSION;
mdio.md_unit = unit;
if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) {
error = errno;
close(mdctl_fd);
errno = error;
atf_tc_fail("ioctl MDIOCDETACH failed: %s",
strerror(errno));
}
}
}
close(mdctl_fd);
}
static void
aio_md_test(completion comp, struct sigevent *sev)
{
int error, fd, mdctl_fd, unit;
char pathname[PATH_MAX];
struct aio_md_arg arg;
struct aio_context ac;
struct md_ioctl mdio;
char buf[80];
ATF_REQUIRE_KERNEL_MODULE("aio");
ATF_REQUIRE_UNSAFE_AIO();
@ -748,32 +685,30 @@ aio_md_test(completion comp)
mdio.md_mediasize = GLOBAL_MAX;
mdio.md_sectorsize = 512;
arg.ama_mdctl_fd = mdctl_fd;
arg.ama_unit = -1;
arg.ama_fd = -1;
if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) {
error = errno;
aio_md_cleanup(&arg);
errno = error;
atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno));
}
close(mdctl_fd);
arg.ama_unit = unit = mdio.md_unit;
/* Store the md unit number in a symlink for future cleanup */
unit = mdio.md_unit;
snprintf(buf, sizeof(buf), "%d", unit);
ATF_REQUIRE_EQ(0, symlink(buf, MDUNIT_LINK));
snprintf(pathname, PATH_MAX, "/dev/md%d", unit);
fd = open(pathname, O_RDWR);
ATF_REQUIRE_MSG(fd != -1,
"opening %s failed: %s", pathname, strerror(errno));
arg.ama_fd = fd;
aio_context_init(&ac, fd, fd, MD_LEN, MD_TIMEOUT,
aio_md_cleanup, &arg);
aio_write_test(&ac, comp);
aio_read_test(&ac, comp);
aio_md_cleanup(&arg);
aio_context_init(&ac, fd, fd, MD_LEN);
aio_write_test(&ac, comp, sev);
aio_read_test(&ac, comp, sev);
close(fd);
}
ATF_TC(md_poll);
ATF_TC_WITH_CLEANUP(md_poll);
ATF_TC_HEAD(md_poll, tc)
{
@ -781,10 +716,29 @@ ATF_TC_HEAD(md_poll, tc)
}
ATF_TC_BODY(md_poll, tc)
{
aio_md_test(poll);
aio_md_test(poll, NULL);
}
ATF_TC_CLEANUP(md_poll, tc)
{
aio_md_cleanup();
}
ATF_TC(md_suspend);
ATF_TC_WITH_CLEANUP(md_signal);
ATF_TC_HEAD(md_signal, tc)
{
atf_tc_set_md_var(tc, "require.user", "root");
}
ATF_TC_BODY(md_signal, tc)
{
aio_md_test(poll_signaled, setup_signal());
}
ATF_TC_CLEANUP(md_signal, tc)
{
aio_md_cleanup();
}
ATF_TC_WITH_CLEANUP(md_suspend);
ATF_TC_HEAD(md_suspend, tc)
{
@ -792,10 +746,29 @@ ATF_TC_HEAD(md_suspend, tc)
}
ATF_TC_BODY(md_suspend, tc)
{
aio_md_test(suspend);
aio_md_test(suspend, NULL);
}
ATF_TC_CLEANUP(md_suspend, tc)
{
aio_md_cleanup();
}
ATF_TC(md_waitcomplete);
ATF_TC_WITH_CLEANUP(md_thread);
ATF_TC_HEAD(md_thread, tc)
{
atf_tc_set_md_var(tc, "require.user", "root");
}
ATF_TC_BODY(md_thread, tc)
{
aio_md_test(poll_signaled, setup_thread());
}
ATF_TC_CLEANUP(md_thread, tc)
{
aio_md_cleanup();
}
ATF_TC_WITH_CLEANUP(md_waitcomplete);
ATF_TC_HEAD(md_waitcomplete, tc)
{
@ -803,7 +776,11 @@ ATF_TC_HEAD(md_waitcomplete, tc)
}
ATF_TC_BODY(md_waitcomplete, tc)
{
aio_md_test(waitcomplete);
aio_md_test(waitcomplete, NULL);
}
ATF_TC_CLEANUP(md_waitcomplete, tc)
{
aio_md_cleanup();
}
ATF_TC_WITHOUT_HEAD(aio_large_read_test);
@ -1152,28 +1129,40 @@ ATF_TP_ADD_TCS(tp)
{
ATF_TP_ADD_TC(tp, file_poll);
ATF_TP_ADD_TC(tp, file_signal);
ATF_TP_ADD_TC(tp, file_suspend);
ATF_TP_ADD_TC(tp, file_thread);
ATF_TP_ADD_TC(tp, file_waitcomplete);
ATF_TP_ADD_TC(tp, fifo_poll);
ATF_TP_ADD_TC(tp, fifo_signal);
ATF_TP_ADD_TC(tp, fifo_suspend);
ATF_TP_ADD_TC(tp, fifo_thread);
ATF_TP_ADD_TC(tp, fifo_waitcomplete);
ATF_TP_ADD_TC(tp, socket_poll);
ATF_TP_ADD_TC(tp, socket_signal);
ATF_TP_ADD_TC(tp, socket_suspend);
ATF_TP_ADD_TC(tp, socket_thread);
ATF_TP_ADD_TC(tp, socket_waitcomplete);
ATF_TP_ADD_TC(tp, pty_poll);
ATF_TP_ADD_TC(tp, pty_signal);
ATF_TP_ADD_TC(tp, pty_suspend);
ATF_TP_ADD_TC(tp, pty_thread);
ATF_TP_ADD_TC(tp, pty_waitcomplete);
ATF_TP_ADD_TC(tp, pipe_poll);
ATF_TP_ADD_TC(tp, pipe_signal);
ATF_TP_ADD_TC(tp, pipe_suspend);
ATF_TP_ADD_TC(tp, pipe_thread);
ATF_TP_ADD_TC(tp, pipe_waitcomplete);
ATF_TP_ADD_TC(tp, md_poll);
ATF_TP_ADD_TC(tp, md_signal);
ATF_TP_ADD_TC(tp, md_suspend);
ATF_TP_ADD_TC(tp, md_thread);
ATF_TP_ADD_TC(tp, md_waitcomplete);
ATF_TP_ADD_TC(tp, aio_fsync_test);
ATF_TP_ADD_TC(tp, aio_large_read_test);
ATF_TP_ADD_TC(tp, aio_socket_two_reads);
ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write);
ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel);
ATF_TP_ADD_TC(tp, aio_fsync_test);
return (atf_no_error());
}

View File

@ -29,7 +29,7 @@
.\" @(#)mt.1 8.1 (Berkeley) 6/6/93
.\" $FreeBSD$
.\"
.Dd May 11, 2017
.Dd July 14, 2017
.Dt MT 1
.Os
.Sh NAME
@ -518,6 +518,7 @@ Value Width Tracks Density Code Type Reference Note
0x53 12.7 (0.5) 1152 13,452 (341,681) C 3592A3 (unencrypted)
0x54 12.7 (0.5) 2560 19,686 (500,024) C 3592A4 (unencrypted)
0x55 12.7 (0.5) 5120 20,670 (525,018) C 3592A5 (unencrypted)
0x56 12.7 (0.5) 7680 20,670 (525,018) C 3592B5 (unencrypted)
0x58 12.7 (0.5) 1280 15,142 (384,607) C LTO-5
0x5A 12.7 (0.5) 2176 15,142 (384,607) C LTO-6
0x5C 12.7 (0.5) 3584 19,107 (485,318) C LTO-7
@ -527,6 +528,7 @@ Value Width Tracks Density Code Type Reference Note
0x73 12.7 (0.5) 1152 13,452 (341,681) C 3592A3 (encrypted)
0x74 12.7 (0.5) 2560 19,686 (500,024) C 3592A4 (encrypted)
0x75 12.7 (0.5) 5120 20,670 (525,018) C 3592A5 (encrypted)
0x76 12.7 (0.5) 7680 20,670 (525,018) C 3592B5 (encrypted)
0x8c 8.0 (0.315) 1 1,789 (45,434) RLL CS EXB-8500c 5,9
0x90 8.0 (0.315) 1 1,703 (43,245) RLL CS EXB-8200c 5,9
.Ed