mirror of
https://github.com/ScrelliCopter/tmx2gba.git
synced 2025-02-21 03:29:25 +11:00
Merge branch 'refs/heads/tmxlite'
This commit is contained in:
8
.github/workflows/cmake.yml
vendored
8
.github/workflows/cmake.yml
vendored
@@ -3,6 +3,7 @@ name: CMake
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- ".github/workflows/cmake.yml"
|
||||
- "src/**"
|
||||
- "ext/**"
|
||||
- "CMakeLists.txt"
|
||||
@@ -22,7 +23,7 @@ jobs:
|
||||
- { name: "Windows MSVC x86", os: windows-latest, artifact: windows-x86, arch: x86 }
|
||||
- { name: "Windows MSVC x64", os: windows-latest, artifact: windows-x64 }
|
||||
- { name: "Windows MSVC ARM64", os: windows-latest, artifact: windows-arm64, arch: amd64_arm64 }
|
||||
- { name: "Ubuntu", artifact: "linux", os: ubuntu-latest }
|
||||
- { name: "Ubuntu", artifact: "linux", os: ubuntu-latest, extra: "-DUSE_BUNDLED_ZSTD:BOOL=OFF -DUSE_BUNDLED_PUGIXML:BOOL=OFF" }
|
||||
runs-on: ${{matrix.config.os}}
|
||||
|
||||
steps:
|
||||
@@ -34,6 +35,11 @@ jobs:
|
||||
if: ${{startsWith(matrix.config.os, 'windows')}}
|
||||
with:
|
||||
arch: ${{matrix.config.arch && matrix.config.arch || 'x64'}}
|
||||
- uses: awalsh128/cache-apt-pkgs-action@latest
|
||||
if: ${{matrix.config.artifact == 'linux'}}
|
||||
with:
|
||||
packages: libzstd-dev libpugixml-dev
|
||||
version: 1.0
|
||||
|
||||
- name: Configure CMake
|
||||
run: >-
|
||||
|
||||
@@ -1,19 +1,48 @@
|
||||
cmake_minimum_required(VERSION "3.15" FATAL_ERROR)
|
||||
project(tmx2gba VERSION "0.3")
|
||||
project(tmx2gba
|
||||
VERSION "0.7"
|
||||
DESCRIPTION "Simple CLI utility for converting Tiled (.tmx) maps to GBA-friendly charmaps."
|
||||
HOMEPAGE_URL "https://github.com/ScrelliCopter/tmx2gba")
|
||||
|
||||
# Options
|
||||
option(TMX2GBA_DKP_INSTALL "Install into DEVKITPRO prefix" OFF)
|
||||
option(ASAN "Enable address sanitiser" OFF)
|
||||
option(USE_ZLIB "Use zlib instead of bundled miniz" "${UNIX}")
|
||||
option(USE_BUNDLED_PUGIXML "Use bundled PUGIXML" ON)
|
||||
option(USE_BUNDLED_ZSTD "Use bundled libzstd" ON)
|
||||
option(USE_BUNDLED_TMXLITE "Use bundled tmxlite" ON)
|
||||
|
||||
if (ASAN)
|
||||
option(TMX2GBA_DKP_INSTALL "Install into DEVKITPRO prefix" OFF)
|
||||
|
||||
option(ENABLE_ASAN "Enable address sanitiser" OFF)
|
||||
|
||||
if (ENABLE_ASAN)
|
||||
add_compile_options(-fsanitize=address -fno-omit-frame-pointer)
|
||||
add_link_options(-fsanitize=address -shared-libasan)
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules")
|
||||
|
||||
# Libraries
|
||||
if (USE_BUNDLED_PUGIXML)
|
||||
add_subdirectory(ext/pugixml)
|
||||
else()
|
||||
find_package(pugixml REQUIRED CONFIG)
|
||||
endif()
|
||||
|
||||
if (USE_ZLIB)
|
||||
find_package(ZLIB REQUIRED)
|
||||
else()
|
||||
add_subdirectory(ext/miniz)
|
||||
endif()
|
||||
|
||||
if (USE_BUNDLED_ZSTD)
|
||||
add_subdirectory(ext/zstd)
|
||||
else()
|
||||
find_package(Zstd REQUIRED)
|
||||
endif()
|
||||
|
||||
add_subdirectory(ext/base64)
|
||||
add_subdirectory(ext/miniz)
|
||||
add_subdirectory(ext/rapidxml)
|
||||
|
||||
add_subdirectory(ext/tmxlite)
|
||||
|
||||
# Main tmx2gba sources
|
||||
add_subdirectory(src)
|
||||
|
||||
85
README.md
85
README.md
@@ -1,43 +1,39 @@
|
||||
# tmx2gba #
|
||||
tmx2gba is a simple command line utility that converts [Tiled](http://www.mapeditor.org/) .tmx maps to Game Boy Advance formatted charmaps.
|
||||
Originally developed for my own personal use, I've thrown it up in case this is of use to anyone else.
|
||||
|
||||
If you find a bug, please open an issue.
|
||||
|
||||
Enjoy!
|
||||
|
||||
### Features ###
|
||||
* Exports to raw binary that can be easily memcpy'd into VRAM.
|
||||
* Export raw charmaps that can be easily memcpy'd into VRAM.
|
||||
* Preserves tile flipping.
|
||||
* Supports per-tile palette specification.
|
||||
* Custom collision layer support.
|
||||
* Support for objects with id mapping.
|
||||
|
||||
### How do I use it? ###
|
||||
## Usage ##
|
||||
```
|
||||
tmx2gba [-h] [-r offset] [-lyc name] [-p 0-15] <-i inpath> <-o outpath>
|
||||
tmx2gba [-hv] [-r offset] [-lyc name] [-p 0-15] [-m name;id] <-i inpath> <-o outpath>
|
||||
```
|
||||
|
||||
| Command | Required | Notes |
|
||||
|--------------|----------|-----------------------------------------------------------------------|
|
||||
| -h | N/A | Display help & command info. |
|
||||
| -v | No | Display version & quit. |
|
||||
| -l (name) | No | Name of layer to use (default first layer in TMX). |
|
||||
| -y (name) | No | Layer for palette mappings. |
|
||||
| -c (name) | No | Output a separate 8bit collision map of the specified layer. |
|
||||
| -r (offset) | No | Offset tile indices (default 0). |
|
||||
| -p (0-15) | No | Select which palette to use for 4-bit tilesets. |
|
||||
| -m (name;id) | No | Map an object name to an ID, will enable object exports. |
|
||||
| -i (path) | *Yes* | Path to input TMX file. |
|
||||
| -o (path) | *Yes* | Path to output files. |
|
||||
| -f <file> | No | Command line instructions list for easy integration with buildscripts |
|
||||
| Command | Required | Notes |
|
||||
|--------------|----------|------------------------------------------------------------------------------------|
|
||||
| -h | N/A | Display help & command info |
|
||||
| -v | No | Display version & quit |
|
||||
| -l (name) | No | Name of layer to use (default first layer in TMX) |
|
||||
| -y (name) | No | Layer for palette mappings |
|
||||
| -c (name) | No | Output a separate 8bit collision map of the specified layer |
|
||||
| -r (offset) | No | Offset tile indices (default 0) |
|
||||
| -p (0-15) | No | Select which palette to use for 4-bit tilesets |
|
||||
| -m (name;id) | No | Map an object name to an ID, will enable object exports |
|
||||
| -i (path) | *Yes* | Path to input TMX file |
|
||||
| -o (path) | *Yes* | Path to output files |
|
||||
| -f <file> | No | Flag file containing command-line arguments for easy integration with buildscripts |
|
||||
|
||||
### How do I build it? ###
|
||||
## Building ##
|
||||
|
||||
Dependencies for building are CMake 3.x and a C++11 compliant compiler,
|
||||
Dependencies for building are CMake 3.15 and a C++20 compliant compiler,
|
||||
all other dependencies are in-tree so you should be able to build with:
|
||||
```bash
|
||||
cmake -B build -DCMAKE_BUILD_TYPE=RelWithDebInfo
|
||||
make -C build -j$(nproc --all)
|
||||
cmake --build build
|
||||
```
|
||||
|
||||
Optionally, you may install it to use it system wide:
|
||||
@@ -45,10 +41,8 @@ Optionally, you may install it to use it system wide:
|
||||
sudo cmake --install build
|
||||
```
|
||||
Which will copy the tmx2gba executable to /usr/local/bin/tmx2gba by default,
|
||||
if you prefer to use /usr for some reason you may specify a prefix like so:
|
||||
```bash
|
||||
sudo cmake --install build --prefix /usr
|
||||
```
|
||||
`--prefix /usr` can be used to override install location.
|
||||
|
||||
If you're a devkitPro user and would prefer to keep all your development tools compartmentalised
|
||||
you may optionally install to the tools directory with the `TMX2GBA_DKP_INSTALL` option (OFF by default).
|
||||
The build scripts will respect your `DEVKITPRO` environment variable but if not set will install to
|
||||
@@ -63,32 +57,11 @@ sudo cmake --install build
|
||||
* Add support for multi-SBB prepared charmaps.
|
||||
* Check if this works for NDS as well.
|
||||
* Compression support.
|
||||
* Support for less common TMX formats.
|
||||
|
||||
### License ###
|
||||
[tmx2gba](https://github.com/ScrelliCopter/tmx2gba) is licensed under the zlib license.
|
||||
[RapidXML](http://rapidxml.sourceforge.net/) is licensed under the Boost & MIT licenses.
|
||||
[René Nyffenegger's base64.cpp](https://github.com/ReneNyffenegger/cpp-base64) is licensed under the zlib license.
|
||||
[miniz](https://github.com/richgel999/miniz) is public domain software.
|
||||
[ultragetopt](https://github.com/kevinoid/ultragetopt) is licensed under the MIT license.
|
||||
|
||||
```
|
||||
Copyright (C) 2015-2023 a dinosaur
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
```
|
||||
## License ##
|
||||
[tmx2gba](https://github.com/ScrelliCopter/tmx2gba) is licensed under the [Zlib license](COPYING.txt).
|
||||
- A modified [tmxlite](https://github.com/fallahn/tmxlite) is licensed under the [Zlib license](ext/tmxlite/LICENSE).
|
||||
- [pugixml](https://pugixml.org/) is licensed under the [MIT license](ext/pugixml/LICENSE.md).
|
||||
- [René Nyffenegger's base64.cpp](https://github.com/ReneNyffenegger/cpp-base64) is licensed under the [Zlib license](ext/base64/LICENSE).
|
||||
- [miniz](https://github.com/richgel999/miniz) is licensed under the [MIT license](ext/miniz/LICENSE).
|
||||
- [ZStandard](https://facebook.github.io/zstd/) is licensed under the [BSD 3-clause license](ext/zstd/LICENSE).
|
||||
|
||||
171
cmake/modules/FindZstd.cmake
Normal file
171
cmake/modules/FindZstd.cmake
Normal file
@@ -0,0 +1,171 @@
|
||||
# SPDX-License-Identifier: Zlib
|
||||
# SPDX-FileCopyrightText: 2024 a dinosaur
|
||||
|
||||
#[=======================================================================[.rst:
|
||||
FindZstd
|
||||
--------
|
||||
|
||||
Find the Facebook Zstd library.
|
||||
|
||||
Imported Targets
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
.. variable:: Zstd::Zstd
|
||||
|
||||
:prop_tgt:`IMPORTED` target for using Zstd, if Zstd is found.
|
||||
|
||||
Result Variables
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
This module defines the following variables:
|
||||
.. variable:: Zstd_FOUND
|
||||
|
||||
True if Zstd was found.
|
||||
|
||||
.. variable:: Zstd_INCLUDE_DIRS
|
||||
|
||||
Path to the directory containing the Zstd headers (zstd.h, etc.)
|
||||
|
||||
.. variable:: Zstd_LIBRARIES
|
||||
|
||||
Location of the Zstd library.
|
||||
|
||||
.. variable:: Zstd_VERSION
|
||||
|
||||
The version of Zstd found.
|
||||
|
||||
Legacy Variables
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
The following variables are defined by the official Zstd CMakeLists.txt:
|
||||
|
||||
.. variable:: zstd_VERSION_MAJOR
|
||||
|
||||
The major version of Zstd.
|
||||
|
||||
.. variable:: zstd_VERSION_MINOR
|
||||
|
||||
The minor version of Zstd.
|
||||
|
||||
.. variable:: zstd_VERSION_PATCH
|
||||
|
||||
The patch/release version of Zstd.
|
||||
|
||||
The following variables are provided for compatibility with old find modules:
|
||||
|
||||
.. variable:: ZSTD_INCLUDE_DIR
|
||||
|
||||
Directory containing the Zstd header. (use ``Zstd_INCLUDE_DIRS`` instead)
|
||||
|
||||
.. variable:: ZSTD_LIBRARY
|
||||
|
||||
The Zstd library. (use ``Zstd_LIBRARIES`` instead)
|
||||
|
||||
Hints
|
||||
^^^^^
|
||||
|
||||
.. variable:: Zstd_PREFER_STATIC_LIBS
|
||||
|
||||
Set to ``ON`` to prefer static libraries. Defaults to ``OFF``
|
||||
|
||||
Cache Variables
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
The following cache variables may also be set,
|
||||
these are transitory and should not be relied upon:
|
||||
|
||||
.. variable:: Zstd_INCLUDE_DIR
|
||||
|
||||
Directory containing the Zstd header.
|
||||
|
||||
.. variable:: Zstd_LIBRARY_DEBUG
|
||||
|
||||
The Zstd debug library if found.
|
||||
|
||||
.. variable:: Zstd_LIBRARY_RELEASE
|
||||
|
||||
The Zstd release library if found.
|
||||
|
||||
.. variable:: Zstd_LIBRARY
|
||||
|
||||
The Zstd library.
|
||||
|
||||
#]=======================================================================]
|
||||
|
||||
#TODO: define Zstd::static & Zstd::shared and alias Zstd::Zstd based on preference
|
||||
|
||||
find_path(Zstd_INCLUDE_DIR NAMES zstd.h)
|
||||
|
||||
mark_as_advanced(Zstd_INCLUDE_DIR)
|
||||
|
||||
if (Zstd_PREFER_STATIC_LIBS)
|
||||
find_library(Zstd_LIBRARY_DEBUG NAMES zstd_staticd zstdd)
|
||||
find_library(Zstd_LIBRARY_RELEASE NAMES zstd_static zstd)
|
||||
else()
|
||||
find_library(Zstd_LIBRARY_DEBUG NAMES zstdd zstd_staticd)
|
||||
find_library(Zstd_LIBRARY_RELEASE NAMES zstd zstd_static)
|
||||
endif()
|
||||
|
||||
include(SelectLibraryConfigurations)
|
||||
select_library_configurations(Zstd)
|
||||
|
||||
mark_as_advanced(Zstd_LIBRARY Zstd_LIBRARY_DEBUG Zstd_LIBRARY_RELEASE)
|
||||
|
||||
if (Zstd_INCLUDE_DIR AND EXISTS "${Zstd_INCLUDE_DIR}/zstd.h")
|
||||
function (_zstd_read_define _variable _define)
|
||||
set(_file "${Zstd_INCLUDE_DIR}/zstd.h")
|
||||
set(_regex "#define[ \t]+${_define}[ \t]+([0-9]+)")
|
||||
file(STRINGS "${_file}" _line LIMIT_COUNT 1 REGEX "${_regex}")
|
||||
if (CMAKE_VERSION VERSION_LESS "3.29")
|
||||
string(REGEX MATCH "${_regex}" _line "${_line}")
|
||||
endif()
|
||||
set(${_variable} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
_zstd_read_define(zstd_VERSION_MAJOR "ZSTD_VERSION_MAJOR")
|
||||
_zstd_read_define(zstd_VERSION_MINOR "ZSTD_VERSION_MINOR")
|
||||
_zstd_read_define(zstd_VERSION_PATCH "ZSTD_VERSION_RELEASE")
|
||||
set(Zstd_VERSION "${zstd_VERSION_MAJOR}.${zstd_VERSION_MINOR}.${zstd_VERSION_PATCH}")
|
||||
|
||||
mark_as_advanced(zstd_VERSION_MAJOR zstd_VERSION_MINOR zstd_VERSION_PATCH Zstd_VERSION)
|
||||
endif()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Zstd
|
||||
REQUIRED_VARS Zstd_LIBRARY Zstd_INCLUDE_DIR
|
||||
VERSION_VAR Zstd_VERSION)
|
||||
|
||||
mark_as_advanced(Zstd_FOUND)
|
||||
|
||||
if (Zstd_FOUND)
|
||||
set(Zstd_INCLUDE_DIRS ${Zstd_INCLUDE_DIR})
|
||||
set(Zstd_LIBRARIES ${Zstd_LIBRARY})
|
||||
|
||||
# Legacy variables
|
||||
set(ZSTD_INCLUDE_DIR ${Zstd_INCLUDE_DIR})
|
||||
set(ZSTD_LIBRARY ${Zstd_LIBRARY})
|
||||
mark_as_advanced(ZSTD_INCLUDE_DIR ZSTD_LIBRARY)
|
||||
|
||||
if (NOT TARGET Zstd::Zstd)
|
||||
add_library(Zstd::Zstd UNKNOWN IMPORTED)
|
||||
set_property(TARGET Zstd::Zstd PROPERTY
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${Zstd_INCLUDE_DIR}")
|
||||
endif()
|
||||
|
||||
if (NOT Zstd_LIBRARY_DEBUG AND NOT Zstd_LIBRARY_RELEASE)
|
||||
set_property(TARGET Zstd::Zstd PROPERTY
|
||||
IMPORTED_LOCATION "${Zstd_LIBRARY}")
|
||||
endif()
|
||||
if (Zstd_LIBRARY_DEBUG)
|
||||
set_property(TARGET Zstd::Zstd APPEND PROPERTY
|
||||
IMPORTED_CONFIGURATIONS DEBUG)
|
||||
set_property(TARGET Zstd::Zstd PROPERTY
|
||||
IMPORTED_LOCATION_DEBUG "${Zstd_LIBRARY_DEBUG}")
|
||||
endif()
|
||||
if (Zstd_LIBRARY_RELEASE)
|
||||
set_property(TARGET Zstd::Zstd APPEND PROPERTY
|
||||
IMPORTED_CONFIGURATIONS RELEASE)
|
||||
set_property(TARGET Zstd::Zstd PROPERTY
|
||||
IMPORTED_LOCATION_RELEASE "${Zstd_LIBRARY_RELEASE}")
|
||||
endif()
|
||||
endif()
|
||||
@@ -1,5 +1,5 @@
|
||||
add_library(base64
|
||||
base64.cpp base64.h)
|
||||
add_library(External::base64 ALIAS base64)
|
||||
add_library(base64::base64 ALIAS base64)
|
||||
target_include_directories(base64
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
19
ext/base64/LICENSE
Normal file
19
ext/base64/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright © 2004-2017 by René Nyffenegger
|
||||
|
||||
This source code is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the author be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this source code must not be misrepresented; you must not
|
||||
claim that you wrote the original source code. If you use this source code
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original source code.
|
||||
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
@@ -5,9 +5,9 @@
|
||||
More information at
|
||||
https://renenyffenegger.ch/notes/development/Base64/Encoding-and-decoding-base-64-with-cpp
|
||||
|
||||
Version: 2.rc.08 (release candidate)
|
||||
Version: 2.rc.09 (release candidate)
|
||||
|
||||
Copyright (C) 2004-2017, 2020, 2021 René Nyffenegger
|
||||
Copyright (C) 2004-2017, 2020-2022 René Nyffenegger
|
||||
|
||||
This source code is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the author be held liable for any damages
|
||||
@@ -34,14 +34,19 @@
|
||||
#include "base64.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#if defined(__cpp_exceptions)
|
||||
#include <stdexcept>
|
||||
#else
|
||||
#include <cassert>
|
||||
#endif
|
||||
|
||||
//
|
||||
// Depending on the url parameter in base64_chars, one of
|
||||
// two sets of base64 characters needs to be chosen.
|
||||
// They differ in their last two characters.
|
||||
//
|
||||
static const char* base64_chars[2] = {
|
||||
static const char* to_base64_chars[2] = {
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
"abcdefghijklmnopqrstuvwxyz"
|
||||
"0123456789"
|
||||
@@ -52,22 +57,40 @@ static const char* base64_chars[2] = {
|
||||
"0123456789"
|
||||
"-_"};
|
||||
|
||||
static const unsigned char from_base64_chars[256] = {
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 62, 64, 63,
|
||||
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64,
|
||||
64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
|
||||
64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
|
||||
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
|
||||
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
|
||||
};
|
||||
|
||||
static unsigned int pos_of_char(const unsigned char chr) {
|
||||
//
|
||||
// Return the position of chr within base64_encode()
|
||||
//
|
||||
|
||||
if (chr >= 'A' && chr <= 'Z') return chr - 'A';
|
||||
else if (chr >= 'a' && chr <= 'z') return chr - 'a' + ('Z' - 'A') + 1;
|
||||
else if (chr >= '0' && chr <= '9') return chr - '0' + ('Z' - 'A') + ('z' - 'a') + 2;
|
||||
else if (chr == '+' || chr == '-') return 62; // Be liberal with input and accept both url ('-') and non-url ('+') base 64 characters (
|
||||
else if (chr == '/' || chr == '_') return 63; // Ditto for '/' and '_'
|
||||
else
|
||||
if (from_base64_chars[chr] != 64) return from_base64_chars[chr];
|
||||
//
|
||||
// 2020-10-23: Throw std::exception rather than const char*
|
||||
//(Pablo Martin-Gomez, https://github.com/Bouska)
|
||||
//
|
||||
#if defined(__cpp_exceptions)
|
||||
throw std::runtime_error("Input is not valid base64-encoded data.");
|
||||
#else
|
||||
assert(!"Input is not valid base64-encoded data.");
|
||||
#endif
|
||||
}
|
||||
|
||||
static std::string insert_linebreaks(std::string str, size_t distance) {
|
||||
@@ -123,7 +146,7 @@ std::string base64_encode(unsigned char const* bytes_to_encode, size_t in_len, b
|
||||
// the correct character set is chosen by subscripting
|
||||
// base64_chars with url.
|
||||
//
|
||||
const char* base64_chars_ = base64_chars[url];
|
||||
const char* base64_chars_ = to_base64_chars[url];
|
||||
|
||||
std::string ret;
|
||||
ret.reserve(len_encoded);
|
||||
@@ -160,7 +183,7 @@ std::string base64_encode(unsigned char const* bytes_to_encode, size_t in_len, b
|
||||
}
|
||||
|
||||
template <typename String>
|
||||
static std::string decode(String encoded_string, bool remove_linebreaks) {
|
||||
static std::string decode(String const& encoded_string, bool remove_linebreaks) {
|
||||
//
|
||||
// decode(…) is templated so that it can be used with String = const std::string&
|
||||
// or std::string_view (requires at least C++17)
|
||||
@@ -204,33 +227,33 @@ static std::string decode(String encoded_string, bool remove_linebreaks) {
|
||||
// The last chunk produces at least one and up to three bytes.
|
||||
//
|
||||
|
||||
size_t pos_of_char_1 = pos_of_char(encoded_string[pos+1] );
|
||||
size_t pos_of_char_1 = pos_of_char(encoded_string.at(pos+1) );
|
||||
|
||||
//
|
||||
// Emit the first output byte that is produced in each chunk:
|
||||
//
|
||||
ret.push_back(static_cast<std::string::value_type>( ( (pos_of_char(encoded_string[pos+0]) ) << 2 ) + ( (pos_of_char_1 & 0x30 ) >> 4)));
|
||||
ret.push_back(static_cast<std::string::value_type>( ( (pos_of_char(encoded_string.at(pos+0)) ) << 2 ) + ( (pos_of_char_1 & 0x30 ) >> 4)));
|
||||
|
||||
if ( ( pos + 2 < length_of_string ) && // Check for data that is not padded with equal signs (which is allowed by RFC 2045)
|
||||
encoded_string[pos+2] != '=' &&
|
||||
encoded_string[pos+2] != '.' // accept URL-safe base 64 strings, too, so check for '.' also.
|
||||
encoded_string.at(pos+2) != '=' &&
|
||||
encoded_string.at(pos+2) != '.' // accept URL-safe base 64 strings, too, so check for '.' also.
|
||||
)
|
||||
{
|
||||
//
|
||||
// Emit a chunk's second byte (which might not be produced in the last chunk).
|
||||
//
|
||||
unsigned int pos_of_char_2 = pos_of_char(encoded_string[pos+2] );
|
||||
unsigned int pos_of_char_2 = pos_of_char(encoded_string.at(pos+2) );
|
||||
ret.push_back(static_cast<std::string::value_type>( (( pos_of_char_1 & 0x0f) << 4) + (( pos_of_char_2 & 0x3c) >> 2)));
|
||||
|
||||
if ( ( pos + 3 < length_of_string ) &&
|
||||
encoded_string[pos+3] != '=' &&
|
||||
encoded_string[pos+3] != '.'
|
||||
encoded_string.at(pos+3) != '=' &&
|
||||
encoded_string.at(pos+3) != '.'
|
||||
)
|
||||
{
|
||||
//
|
||||
// Emit a chunk's third byte (which might not be produced in the last chunk).
|
||||
//
|
||||
ret.push_back(static_cast<std::string::value_type>( ( (pos_of_char_2 & 0x03 ) << 6 ) + pos_of_char(encoded_string[pos+3]) ));
|
||||
ret.push_back(static_cast<std::string::value_type>( ( (pos_of_char_2 & 0x03 ) << 6 ) + pos_of_char(encoded_string.at(pos+3)) ));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// base64 encoding and decoding with C++.
|
||||
// Version: 2.rc.08 (release candidate)
|
||||
// Version: 2.rc.09 (release candidate)
|
||||
//
|
||||
|
||||
#ifndef BASE64_H_C0CE2A47_D10E_42C9_A27C_C883944E704A
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
add_library(miniz
|
||||
miniz.c miniz.h)
|
||||
add_library(External::miniz ALIAS miniz)
|
||||
add_library(miniz::miniz ALIAS miniz)
|
||||
target_include_directories(miniz
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
22
ext/miniz/LICENSE
Normal file
22
ext/miniz/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright 2013-2014 RAD Game Tools and Valve Software
|
||||
Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
|
||||
|
||||
All Rights Reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
334
ext/miniz/miniz.c
vendored
334
ext/miniz/miniz.c
vendored
@@ -187,6 +187,8 @@ const char *mz_version(void)
|
||||
|
||||
#ifndef MINIZ_NO_ZLIB_APIS
|
||||
|
||||
#ifndef MINIZ_NO_DEFLATE_APIS
|
||||
|
||||
int mz_deflateInit(mz_streamp pStream, int level)
|
||||
{
|
||||
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY);
|
||||
@@ -321,7 +323,7 @@ int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char
|
||||
memset(&stream, 0, sizeof(stream));
|
||||
|
||||
/* In case mz_ulong is 64-bits (argh I hate longs). */
|
||||
if ((source_len | *pDest_len) > 0xFFFFFFFFU)
|
||||
if ((mz_uint64)(source_len | *pDest_len) > 0xFFFFFFFFU)
|
||||
return MZ_PARAM_ERROR;
|
||||
|
||||
stream.next_in = pSource;
|
||||
@@ -354,6 +356,10 @@ mz_ulong mz_compressBound(mz_ulong source_len)
|
||||
return mz_deflateBound(NULL, source_len);
|
||||
}
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_DEFLATE_APIS*/
|
||||
|
||||
#ifndef MINIZ_NO_INFLATE_APIS
|
||||
|
||||
typedef struct
|
||||
{
|
||||
tinfl_decompressor m_decomp;
|
||||
@@ -560,7 +566,7 @@ int mz_uncompress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned cha
|
||||
memset(&stream, 0, sizeof(stream));
|
||||
|
||||
/* In case mz_ulong is 64-bits (argh I hate longs). */
|
||||
if ((*pSource_len | *pDest_len) > 0xFFFFFFFFU)
|
||||
if ((mz_uint64)(*pSource_len | *pDest_len) > 0xFFFFFFFFU)
|
||||
return MZ_PARAM_ERROR;
|
||||
|
||||
stream.next_in = pSource;
|
||||
@@ -589,6 +595,8 @@ int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char
|
||||
return mz_uncompress2(pDest, pDest_len, pSource, &source_len);
|
||||
}
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_INFLATE_APIS*/
|
||||
|
||||
const char *mz_error(int err)
|
||||
{
|
||||
static struct
|
||||
@@ -666,6 +674,8 @@ const char *mz_error(int err)
|
||||
|
||||
|
||||
|
||||
#ifndef MINIZ_NO_DEFLATE_APIS
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -744,7 +754,7 @@ static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *p
|
||||
{
|
||||
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
|
||||
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
|
||||
MZ_CLEAR_OBJ(hist);
|
||||
MZ_CLEAR_ARR(hist);
|
||||
for (i = 0; i < num_syms; i++)
|
||||
{
|
||||
mz_uint freq = pSyms0[i].m_key;
|
||||
@@ -862,7 +872,7 @@ static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int
|
||||
{
|
||||
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
|
||||
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
|
||||
MZ_CLEAR_OBJ(num_codes);
|
||||
MZ_CLEAR_ARR(num_codes);
|
||||
if (static_table)
|
||||
{
|
||||
for (i = 0; i < table_len; i++)
|
||||
@@ -888,8 +898,8 @@ static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int
|
||||
|
||||
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);
|
||||
|
||||
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
|
||||
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
|
||||
MZ_CLEAR_ARR(d->m_huff_code_sizes[table_num]);
|
||||
MZ_CLEAR_ARR(d->m_huff_codes[table_num]);
|
||||
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
|
||||
for (l = num_codes[i]; l > 0; l--)
|
||||
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
|
||||
@@ -975,7 +985,7 @@ static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int
|
||||
} \
|
||||
}
|
||||
|
||||
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
|
||||
static const mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
|
||||
|
||||
static void tdefl_start_dynamic_block(tdefl_compressor *d)
|
||||
{
|
||||
@@ -1113,7 +1123,8 @@ static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
|
||||
if (flags & 1)
|
||||
{
|
||||
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
|
||||
mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
|
||||
mz_uint match_len = pLZ_codes[0];
|
||||
mz_uint match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
|
||||
pLZ_codes += 3;
|
||||
|
||||
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
|
||||
@@ -1158,7 +1169,7 @@ static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
|
||||
if (pOutput_buf >= d->m_pOutput_buf_end)
|
||||
return MZ_FALSE;
|
||||
|
||||
*(mz_uint64 *)pOutput_buf = bit_buffer;
|
||||
memcpy(pOutput_buf, &bit_buffer, sizeof(mz_uint64));
|
||||
pOutput_buf += (bits_in >> 3);
|
||||
bit_buffer >>= (bits_in & ~7);
|
||||
bits_in &= 7;
|
||||
@@ -1240,6 +1251,8 @@ static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
|
||||
return tdefl_compress_lz_codes(d);
|
||||
}
|
||||
|
||||
static const mz_uint s_tdefl_num_probes[11];
|
||||
|
||||
static int tdefl_flush_block(tdefl_compressor *d, int flush)
|
||||
{
|
||||
mz_uint saved_bit_buf, saved_bits_in;
|
||||
@@ -1260,8 +1273,27 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush)
|
||||
|
||||
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index))
|
||||
{
|
||||
TDEFL_PUT_BITS(0x78, 8);
|
||||
TDEFL_PUT_BITS(0x01, 8);
|
||||
const mz_uint8 cmf = 0x78;
|
||||
mz_uint8 flg, flevel = 3;
|
||||
mz_uint header, i, mz_un = sizeof(s_tdefl_num_probes) / sizeof(mz_uint);
|
||||
|
||||
/* Determine compression level by reversing the process in tdefl_create_comp_flags_from_zip_params() */
|
||||
for (i = 0; i < mz_un; i++)
|
||||
if (s_tdefl_num_probes[i] == (d->m_flags & 0xFFF)) break;
|
||||
|
||||
if (i < 2)
|
||||
flevel = 0;
|
||||
else if (i < 6)
|
||||
flevel = 1;
|
||||
else if (i == 6)
|
||||
flevel = 2;
|
||||
|
||||
header = cmf << 8 | (flevel << 6);
|
||||
header += 31 - (header % 31);
|
||||
flg = header & 0xFF;
|
||||
|
||||
TDEFL_PUT_BITS(cmf, 8);
|
||||
TDEFL_PUT_BITS(flg, 8);
|
||||
}
|
||||
|
||||
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
|
||||
@@ -1732,7 +1764,7 @@ static mz_bool tdefl_compress_normal(tdefl_compressor *d)
|
||||
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
|
||||
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
|
||||
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
|
||||
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
|
||||
const mz_uint8 *pSrc_end = pSrc ? pSrc + num_bytes_to_process : NULL;
|
||||
src_buf_left -= num_bytes_to_process;
|
||||
d->m_lookahead_size += num_bytes_to_process;
|
||||
while (pSrc != pSrc_end)
|
||||
@@ -1942,8 +1974,8 @@ tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pI
|
||||
d->m_finished = (flush == TDEFL_FINISH);
|
||||
if (flush == TDEFL_FULL_FLUSH)
|
||||
{
|
||||
MZ_CLEAR_OBJ(d->m_hash);
|
||||
MZ_CLEAR_OBJ(d->m_next);
|
||||
MZ_CLEAR_ARR(d->m_hash);
|
||||
MZ_CLEAR_ARR(d->m_next);
|
||||
d->m_dict_size = 0;
|
||||
}
|
||||
}
|
||||
@@ -1966,7 +1998,7 @@ tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_fun
|
||||
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
|
||||
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
|
||||
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
|
||||
MZ_CLEAR_OBJ(d->m_hash);
|
||||
MZ_CLEAR_ARR(d->m_hash);
|
||||
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
|
||||
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
|
||||
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
|
||||
@@ -1987,7 +2019,7 @@ tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_fun
|
||||
d->m_src_buf_left = 0;
|
||||
d->m_out_buf_ofs = 0;
|
||||
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
|
||||
MZ_CLEAR_OBJ(d->m_dict);
|
||||
MZ_CLEAR_ARR(d->m_dict);
|
||||
memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
|
||||
memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
|
||||
return TDEFL_STATUS_OKAY;
|
||||
@@ -2197,7 +2229,7 @@ void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
|
||||
/* Allocate the tdefl_compressor and tinfl_decompressor structures in C so that */
|
||||
/* non-C language bindings to tdefL_ and tinfl_ API don't need to worry about */
|
||||
/* structure size and allocation mechanism. */
|
||||
tdefl_compressor *tdefl_compressor_alloc()
|
||||
tdefl_compressor *tdefl_compressor_alloc(void)
|
||||
{
|
||||
return (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
|
||||
}
|
||||
@@ -2215,6 +2247,8 @@ void tdefl_compressor_free(tdefl_compressor *pComp)
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_DEFLATE_APIS*/
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2013-2014 RAD Game Tools and Valve Software
|
||||
@@ -2243,6 +2277,8 @@ void tdefl_compressor_free(tdefl_compressor *pComp)
|
||||
|
||||
|
||||
|
||||
#ifndef MINIZ_NO_INFLATE_APIS
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -2323,10 +2359,10 @@ extern "C" {
|
||||
/* It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a */
|
||||
/* Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the */
|
||||
/* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
|
||||
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
|
||||
#define TINFL_HUFF_BITBUF_FILL(state_index, pLookUp, pTree) \
|
||||
do \
|
||||
{ \
|
||||
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
|
||||
temp = pLookUp[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
|
||||
if (temp >= 0) \
|
||||
{ \
|
||||
code_len = temp >> 9; \
|
||||
@@ -2338,7 +2374,7 @@ extern "C" {
|
||||
code_len = TINFL_FAST_LOOKUP_BITS; \
|
||||
do \
|
||||
{ \
|
||||
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
|
||||
temp = pTree[~temp + ((bit_buf >> code_len++) & 1)]; \
|
||||
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
|
||||
if (temp >= 0) \
|
||||
break; \
|
||||
@@ -2354,7 +2390,7 @@ extern "C" {
|
||||
/* The slow path is only executed at the very end of the input buffer. */
|
||||
/* v1.16: The original macro handled the case at the very end of the passed-in input buffer, but we also need to handle the case where the user passes in 1+zillion bytes */
|
||||
/* following the deflate data and our non-conservative read-ahead path won't kick in here on this code. This is much trickier. */
|
||||
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
|
||||
#define TINFL_HUFF_DECODE(state_index, sym, pLookUp, pTree) \
|
||||
do \
|
||||
{ \
|
||||
int temp; \
|
||||
@@ -2363,7 +2399,7 @@ extern "C" {
|
||||
{ \
|
||||
if ((pIn_buf_end - pIn_buf_cur) < 2) \
|
||||
{ \
|
||||
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
|
||||
TINFL_HUFF_BITBUF_FILL(state_index, pLookUp, pTree); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
@@ -2372,14 +2408,14 @@ extern "C" {
|
||||
num_bits += 16; \
|
||||
} \
|
||||
} \
|
||||
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
|
||||
if ((temp = pLookUp[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
|
||||
code_len = temp >> 9, temp &= 511; \
|
||||
else \
|
||||
{ \
|
||||
code_len = TINFL_FAST_LOOKUP_BITS; \
|
||||
do \
|
||||
{ \
|
||||
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
|
||||
temp = pTree[~temp + ((bit_buf >> code_len++) & 1)]; \
|
||||
} while (temp < 0); \
|
||||
} \
|
||||
sym = temp; \
|
||||
@@ -2388,20 +2424,33 @@ extern "C" {
|
||||
} \
|
||||
MZ_MACRO_END
|
||||
|
||||
static void tinfl_clear_tree(tinfl_decompressor *r)
|
||||
{
|
||||
if (r->m_type == 0)
|
||||
MZ_CLEAR_ARR(r->m_tree_0);
|
||||
else if (r->m_type == 1)
|
||||
MZ_CLEAR_ARR(r->m_tree_1);
|
||||
else
|
||||
MZ_CLEAR_ARR(r->m_tree_2);
|
||||
}
|
||||
|
||||
tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
|
||||
{
|
||||
static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };
|
||||
static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 };
|
||||
static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };
|
||||
static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };
|
||||
static const mz_uint16 s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };
|
||||
static const mz_uint8 s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 };
|
||||
static const mz_uint16 s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };
|
||||
static const mz_uint8 s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };
|
||||
static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
|
||||
static const int s_min_table_sizes[3] = { 257, 1, 4 };
|
||||
static const mz_uint16 s_min_table_sizes[3] = { 257, 1, 4 };
|
||||
|
||||
mz_int16 *pTrees[3];
|
||||
mz_uint8 *pCode_sizes[3];
|
||||
|
||||
tinfl_status status = TINFL_STATUS_FAILED;
|
||||
mz_uint32 num_bits, dist, counter, num_extra;
|
||||
tinfl_bit_buf_t bit_buf;
|
||||
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
|
||||
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
|
||||
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next ? pOut_buf_next + *pOut_buf_size : NULL;
|
||||
size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
|
||||
|
||||
/* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */
|
||||
@@ -2411,6 +2460,13 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
return TINFL_STATUS_BAD_PARAM;
|
||||
}
|
||||
|
||||
pTrees[0] = r->m_tree_0;
|
||||
pTrees[1] = r->m_tree_1;
|
||||
pTrees[2] = r->m_tree_2;
|
||||
pCode_sizes[0] = r->m_code_size_0;
|
||||
pCode_sizes[1] = r->m_code_size_1;
|
||||
pCode_sizes[2] = r->m_code_size_2;
|
||||
|
||||
num_bits = r->m_num_bits;
|
||||
bit_buf = r->m_bit_buf;
|
||||
dist = r->m_dist;
|
||||
@@ -2427,7 +2483,7 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
TINFL_GET_BYTE(2, r->m_zhdr1);
|
||||
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
|
||||
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
|
||||
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
|
||||
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)((size_t)1 << (8U + (r->m_zhdr0 >> 4)))));
|
||||
if (counter)
|
||||
{
|
||||
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
|
||||
@@ -2488,11 +2544,11 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
{
|
||||
if (r->m_type == 1)
|
||||
{
|
||||
mz_uint8 *p = r->m_tables[0].m_code_size;
|
||||
mz_uint8 *p = r->m_code_size_0;
|
||||
mz_uint i;
|
||||
r->m_table_sizes[0] = 288;
|
||||
r->m_table_sizes[1] = 32;
|
||||
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
|
||||
TINFL_MEMSET(r->m_code_size_1, 5, 32);
|
||||
for (i = 0; i <= 143; ++i)
|
||||
*p++ = 8;
|
||||
for (; i <= 255; ++i)
|
||||
@@ -2509,26 +2565,30 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
|
||||
r->m_table_sizes[counter] += s_min_table_sizes[counter];
|
||||
}
|
||||
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
|
||||
MZ_CLEAR_ARR(r->m_code_size_2);
|
||||
for (counter = 0; counter < r->m_table_sizes[2]; counter++)
|
||||
{
|
||||
mz_uint s;
|
||||
TINFL_GET_BITS(14, s, 3);
|
||||
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
|
||||
r->m_code_size_2[s_length_dezigzag[counter]] = (mz_uint8)s;
|
||||
}
|
||||
r->m_table_sizes[2] = 19;
|
||||
}
|
||||
for (; (int)r->m_type >= 0; r->m_type--)
|
||||
{
|
||||
int tree_next, tree_cur;
|
||||
tinfl_huff_table *pTable;
|
||||
mz_int16 *pLookUp;
|
||||
mz_int16 *pTree;
|
||||
mz_uint8 *pCode_size;
|
||||
mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16];
|
||||
pTable = &r->m_tables[r->m_type];
|
||||
MZ_CLEAR_OBJ(total_syms);
|
||||
MZ_CLEAR_OBJ(pTable->m_look_up);
|
||||
MZ_CLEAR_OBJ(pTable->m_tree);
|
||||
pLookUp = r->m_look_up[r->m_type];
|
||||
pTree = pTrees[r->m_type];
|
||||
pCode_size = pCode_sizes[r->m_type];
|
||||
MZ_CLEAR_ARR(total_syms);
|
||||
TINFL_MEMSET(pLookUp, 0, sizeof(r->m_look_up[0]));
|
||||
tinfl_clear_tree(r);
|
||||
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
|
||||
total_syms[pTable->m_code_size[i]]++;
|
||||
total_syms[pCode_size[i]]++;
|
||||
used_syms = 0, total = 0;
|
||||
next_code[0] = next_code[1] = 0;
|
||||
for (i = 1; i <= 15; ++i)
|
||||
@@ -2542,7 +2602,7 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
}
|
||||
for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
|
||||
{
|
||||
mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index];
|
||||
mz_uint rev_code = 0, l, cur_code, code_size = pCode_size[sym_index];
|
||||
if (!code_size)
|
||||
continue;
|
||||
cur_code = next_code[code_size]++;
|
||||
@@ -2553,14 +2613,14 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
|
||||
while (rev_code < TINFL_FAST_LOOKUP_SIZE)
|
||||
{
|
||||
pTable->m_look_up[rev_code] = k;
|
||||
pLookUp[rev_code] = k;
|
||||
rev_code += (1 << code_size);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)]))
|
||||
if (0 == (tree_cur = pLookUp[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)]))
|
||||
{
|
||||
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next;
|
||||
pLookUp[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next;
|
||||
tree_cur = tree_next;
|
||||
tree_next -= 2;
|
||||
}
|
||||
@@ -2568,24 +2628,24 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
|
||||
{
|
||||
tree_cur -= ((rev_code >>= 1) & 1);
|
||||
if (!pTable->m_tree[-tree_cur - 1])
|
||||
if (!pTree[-tree_cur - 1])
|
||||
{
|
||||
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
|
||||
pTree[-tree_cur - 1] = (mz_int16)tree_next;
|
||||
tree_cur = tree_next;
|
||||
tree_next -= 2;
|
||||
}
|
||||
else
|
||||
tree_cur = pTable->m_tree[-tree_cur - 1];
|
||||
tree_cur = pTree[-tree_cur - 1];
|
||||
}
|
||||
tree_cur -= ((rev_code >>= 1) & 1);
|
||||
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
|
||||
pTree[-tree_cur - 1] = (mz_int16)sym_index;
|
||||
}
|
||||
if (r->m_type == 2)
|
||||
{
|
||||
for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);)
|
||||
{
|
||||
mz_uint s;
|
||||
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
|
||||
TINFL_HUFF_DECODE(16, dist, r->m_look_up[2], r->m_tree_2);
|
||||
if (dist < 16)
|
||||
{
|
||||
r->m_len_codes[counter++] = (mz_uint8)dist;
|
||||
@@ -2605,8 +2665,8 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
{
|
||||
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
|
||||
}
|
||||
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]);
|
||||
TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
|
||||
TINFL_MEMCPY(r->m_code_size_0, r->m_len_codes, r->m_table_sizes[0]);
|
||||
TINFL_MEMCPY(r->m_code_size_1, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
|
||||
}
|
||||
}
|
||||
for (;;)
|
||||
@@ -2616,7 +2676,7 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
{
|
||||
if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
|
||||
{
|
||||
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
|
||||
TINFL_HUFF_DECODE(23, counter, r->m_look_up[0], r->m_tree_0);
|
||||
if (counter >= 256)
|
||||
break;
|
||||
while (pOut_buf_cur >= pOut_buf_end)
|
||||
@@ -2644,14 +2704,14 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
num_bits += 16;
|
||||
}
|
||||
#endif
|
||||
if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
|
||||
if ((sym2 = r->m_look_up[0][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
|
||||
code_len = sym2 >> 9;
|
||||
else
|
||||
{
|
||||
code_len = TINFL_FAST_LOOKUP_BITS;
|
||||
do
|
||||
{
|
||||
sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
|
||||
sym2 = r->m_tree_0[~sym2 + ((bit_buf >> code_len++) & 1)];
|
||||
} while (sym2 < 0);
|
||||
}
|
||||
counter = sym2;
|
||||
@@ -2668,14 +2728,14 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
num_bits += 16;
|
||||
}
|
||||
#endif
|
||||
if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
|
||||
if ((sym2 = r->m_look_up[0][bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
|
||||
code_len = sym2 >> 9;
|
||||
else
|
||||
{
|
||||
code_len = TINFL_FAST_LOOKUP_BITS;
|
||||
do
|
||||
{
|
||||
sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
|
||||
sym2 = r->m_tree_0[~sym2 + ((bit_buf >> code_len++) & 1)];
|
||||
} while (sym2 < 0);
|
||||
}
|
||||
bit_buf >>= code_len;
|
||||
@@ -2704,7 +2764,7 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
counter += extra_bits;
|
||||
}
|
||||
|
||||
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
|
||||
TINFL_HUFF_DECODE(26, dist, r->m_look_up[1], r->m_tree_1);
|
||||
num_extra = s_dist_extra[dist];
|
||||
dist = s_dist_base[dist];
|
||||
if (num_extra)
|
||||
@@ -2789,7 +2849,7 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex
|
||||
--pIn_buf_cur;
|
||||
num_bits -= 8;
|
||||
}
|
||||
bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
|
||||
bit_buf &= ~(~(tinfl_bit_buf_t)0 << num_bits);
|
||||
MZ_ASSERT(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */
|
||||
|
||||
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
|
||||
@@ -2821,7 +2881,7 @@ common_exit:
|
||||
}
|
||||
}
|
||||
r->m_num_bits = num_bits;
|
||||
r->m_bit_buf = bit_buf & (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
|
||||
r->m_bit_buf = bit_buf & ~(~(tinfl_bit_buf_t)0 << num_bits);
|
||||
r->m_dist = dist;
|
||||
r->m_counter = counter;
|
||||
r->m_num_extra = num_extra;
|
||||
@@ -2916,6 +2976,7 @@ int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
|
||||
size_t in_buf_ofs = 0, dict_ofs = 0;
|
||||
if (!pDict)
|
||||
return TINFL_STATUS_FAILED;
|
||||
memset(pDict,0,TINFL_LZ_DICT_SIZE);
|
||||
tinfl_init(&decomp);
|
||||
for (;;)
|
||||
{
|
||||
@@ -2938,7 +2999,7 @@ int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
|
||||
}
|
||||
|
||||
#ifndef MINIZ_NO_MALLOC
|
||||
tinfl_decompressor *tinfl_decompressor_alloc()
|
||||
tinfl_decompressor *tinfl_decompressor_alloc(void)
|
||||
{
|
||||
tinfl_decompressor *pDecomp = (tinfl_decompressor *)MZ_MALLOC(sizeof(tinfl_decompressor));
|
||||
if (pDecomp)
|
||||
@@ -2955,6 +3016,8 @@ void tinfl_decompressor_free(tinfl_decompressor *pDecomp)
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_INFLATE_APIS*/
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2013-2014 RAD Game Tools and Valve Software
|
||||
@@ -2997,19 +3060,48 @@ extern "C" {
|
||||
#include <sys/stat.h>
|
||||
|
||||
#if defined(_MSC_VER) || defined(__MINGW64__)
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
|
||||
static WCHAR* mz_utf8z_to_widechar(const char* str)
|
||||
{
|
||||
int reqChars = MultiByteToWideChar(CP_UTF8, 0, str, -1, NULL, 0);
|
||||
WCHAR* wStr = (WCHAR*)malloc(reqChars * sizeof(WCHAR));
|
||||
MultiByteToWideChar(CP_UTF8, 0, str, -1, wStr, reqChars);
|
||||
return wStr;
|
||||
}
|
||||
|
||||
static FILE *mz_fopen(const char *pFilename, const char *pMode)
|
||||
{
|
||||
FILE *pFile = NULL;
|
||||
fopen_s(&pFile, pFilename, pMode);
|
||||
return pFile;
|
||||
WCHAR* wFilename = mz_utf8z_to_widechar(pFilename);
|
||||
WCHAR* wMode = mz_utf8z_to_widechar(pMode);
|
||||
FILE* pFile = NULL;
|
||||
errno_t err = _wfopen_s(&pFile, wFilename, wMode);
|
||||
free(wFilename);
|
||||
free(wMode);
|
||||
return err ? NULL : pFile;
|
||||
}
|
||||
|
||||
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream)
|
||||
{
|
||||
FILE *pFile = NULL;
|
||||
if (freopen_s(&pFile, pPath, pMode, pStream))
|
||||
return NULL;
|
||||
return pFile;
|
||||
WCHAR* wPath = mz_utf8z_to_widechar(pPath);
|
||||
WCHAR* wMode = mz_utf8z_to_widechar(pMode);
|
||||
FILE* pFile = NULL;
|
||||
errno_t err = _wfreopen_s(&pFile, wPath, wMode, pStream);
|
||||
free(wPath);
|
||||
free(wMode);
|
||||
return err ? NULL : pFile;
|
||||
}
|
||||
|
||||
static int mz_stat64(const char *path, struct __stat64 *buffer)
|
||||
{
|
||||
WCHAR* wPath = mz_utf8z_to_widechar(path);
|
||||
int res = _wstat64(wPath, buffer);
|
||||
free(wPath);
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifndef MINIZ_NO_TIME
|
||||
#include <sys/utime.h>
|
||||
#endif
|
||||
@@ -3020,11 +3112,12 @@ static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream)
|
||||
#define MZ_FTELL64 _ftelli64
|
||||
#define MZ_FSEEK64 _fseeki64
|
||||
#define MZ_FILE_STAT_STRUCT _stat64
|
||||
#define MZ_FILE_STAT _stat64
|
||||
#define MZ_FILE_STAT mz_stat64
|
||||
#define MZ_FFLUSH fflush
|
||||
#define MZ_FREOPEN mz_freopen
|
||||
#define MZ_DELETE_FILE remove
|
||||
#elif defined(__MINGW32__)
|
||||
|
||||
#elif defined(__MINGW32__) || defined(__WATCOMC__)
|
||||
#ifndef MINIZ_NO_TIME
|
||||
#include <sys/utime.h>
|
||||
#endif
|
||||
@@ -3032,13 +3125,14 @@ static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream)
|
||||
#define MZ_FCLOSE fclose
|
||||
#define MZ_FREAD fread
|
||||
#define MZ_FWRITE fwrite
|
||||
#define MZ_FTELL64 ftello64
|
||||
#define MZ_FSEEK64 fseeko64
|
||||
#define MZ_FILE_STAT_STRUCT _stat
|
||||
#define MZ_FILE_STAT _stat
|
||||
#define MZ_FTELL64 _ftelli64
|
||||
#define MZ_FSEEK64 _fseeki64
|
||||
#define MZ_FILE_STAT_STRUCT stat
|
||||
#define MZ_FILE_STAT stat
|
||||
#define MZ_FFLUSH fflush
|
||||
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
|
||||
#define MZ_DELETE_FILE remove
|
||||
|
||||
#elif defined(__TINYC__)
|
||||
#ifndef MINIZ_NO_TIME
|
||||
#include <sys/utime.h>
|
||||
@@ -3054,6 +3148,7 @@ static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream)
|
||||
#define MZ_FFLUSH fflush
|
||||
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
|
||||
#define MZ_DELETE_FILE remove
|
||||
|
||||
#elif defined(__USE_LARGEFILE64) /* gcc, clang */
|
||||
#ifndef MINIZ_NO_TIME
|
||||
#include <utime.h>
|
||||
@@ -3069,7 +3164,8 @@ static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream)
|
||||
#define MZ_FFLUSH fflush
|
||||
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
|
||||
#define MZ_DELETE_FILE remove
|
||||
#elif defined(__APPLE__)
|
||||
|
||||
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
||||
#ifndef MINIZ_NO_TIME
|
||||
#include <utime.h>
|
||||
#endif
|
||||
@@ -3215,7 +3311,7 @@ struct mz_zip_internal_state_tag
|
||||
mz_zip_array m_sorted_central_dir_offsets;
|
||||
|
||||
/* The flags passed in when the archive is initially opened. */
|
||||
uint32_t m_init_flags;
|
||||
mz_uint32 m_init_flags;
|
||||
|
||||
/* MZ_TRUE if the archive has a zip64 end of central directory headers, etc. */
|
||||
mz_bool m_zip64;
|
||||
@@ -3651,7 +3747,7 @@ static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flag
|
||||
if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1)))
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);
|
||||
|
||||
if (cdir_size < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
|
||||
if (cdir_size < (mz_uint64)pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
|
||||
|
||||
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size)
|
||||
@@ -3802,7 +3898,7 @@ static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flag
|
||||
void mz_zip_zero_struct(mz_zip_archive *pZip)
|
||||
{
|
||||
if (pZip)
|
||||
MZ_CLEAR_OBJ(*pZip);
|
||||
MZ_CLEAR_PTR(pZip);
|
||||
}
|
||||
|
||||
static mz_bool mz_zip_reader_end_internal(mz_zip_archive *pZip, mz_bool set_last_error)
|
||||
@@ -4276,7 +4372,7 @@ static mz_bool mz_zip_locate_file_binary_search(mz_zip_archive *pZip, const char
|
||||
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
|
||||
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
|
||||
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
|
||||
const uint32_t size = pZip->m_total_files;
|
||||
const mz_uint32 size = pZip->m_total_files;
|
||||
const mz_uint filename_len = (mz_uint)strlen(pFilename);
|
||||
|
||||
if (pIndex)
|
||||
@@ -4291,7 +4387,7 @@ static mz_bool mz_zip_locate_file_binary_search(mz_zip_archive *pZip, const char
|
||||
while (l <= h)
|
||||
{
|
||||
mz_int64 m = l + ((h - l) >> 1);
|
||||
uint32_t file_index = pIndices[(uint32_t)m];
|
||||
mz_uint32 file_index = pIndices[(mz_uint32)m];
|
||||
|
||||
int comp = mz_zip_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len);
|
||||
if (!comp)
|
||||
@@ -4384,7 +4480,8 @@ mz_bool mz_zip_reader_locate_file_v2(mz_zip_archive *pZip, const char *pName, co
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND);
|
||||
}
|
||||
|
||||
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size)
|
||||
static
|
||||
mz_bool mz_zip_reader_extract_to_mem_no_alloc1(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size, const mz_zip_archive_file_stat *st)
|
||||
{
|
||||
int status = TINFL_STATUS_DONE;
|
||||
mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
|
||||
@@ -4397,6 +4494,9 @@ mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file
|
||||
if ((!pZip) || (!pZip->m_pState) || ((buf_size) && (!pBuf)) || ((user_read_buf_size) && (!pUser_read_buf)) || (!pZip->m_pRead))
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
|
||||
|
||||
if (st) {
|
||||
file_stat = *st;
|
||||
} else
|
||||
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
|
||||
return MZ_FALSE;
|
||||
|
||||
@@ -4527,17 +4627,22 @@ mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file
|
||||
return status == TINFL_STATUS_DONE;
|
||||
}
|
||||
|
||||
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size)
|
||||
{
|
||||
return mz_zip_reader_extract_to_mem_no_alloc1(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size, NULL);
|
||||
}
|
||||
|
||||
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size)
|
||||
{
|
||||
mz_uint32 file_index;
|
||||
if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index))
|
||||
return MZ_FALSE;
|
||||
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size);
|
||||
return mz_zip_reader_extract_to_mem_no_alloc1(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size, NULL);
|
||||
}
|
||||
|
||||
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags)
|
||||
{
|
||||
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0);
|
||||
return mz_zip_reader_extract_to_mem_no_alloc1(pZip, file_index, pBuf, buf_size, flags, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags)
|
||||
@@ -4547,23 +4652,17 @@ mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFil
|
||||
|
||||
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags)
|
||||
{
|
||||
mz_uint64 comp_size, uncomp_size, alloc_size;
|
||||
const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index);
|
||||
mz_zip_archive_file_stat file_stat;
|
||||
mz_uint64 alloc_size;
|
||||
void *pBuf;
|
||||
|
||||
if (pSize)
|
||||
*pSize = 0;
|
||||
|
||||
if (!p)
|
||||
{
|
||||
mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
|
||||
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
|
||||
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
|
||||
|
||||
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
|
||||
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size;
|
||||
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
|
||||
{
|
||||
mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);
|
||||
@@ -4576,7 +4675,7 @@ void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, si
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags))
|
||||
if (!mz_zip_reader_extract_to_mem_no_alloc1(pZip, file_index, pBuf, (size_t)alloc_size, flags, NULL, 0, &file_stat))
|
||||
{
|
||||
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
|
||||
return NULL;
|
||||
@@ -5037,7 +5136,7 @@ size_t mz_zip_reader_extract_iter_read(mz_zip_reader_extract_iter_state* pState,
|
||||
size_t to_copy = MZ_MIN( (buf_size - copied_to_caller), pState->out_blk_remain );
|
||||
|
||||
/* Copy data to caller's buffer */
|
||||
memcpy( (uint8_t*)pvBuf + copied_to_caller, pWrite_buf_cur, to_copy );
|
||||
memcpy( (mz_uint8*)pvBuf + copied_to_caller, pWrite_buf_cur, to_copy );
|
||||
|
||||
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
|
||||
/* Perform CRC */
|
||||
@@ -5406,7 +5505,7 @@ handle_failure:
|
||||
mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags)
|
||||
{
|
||||
mz_zip_internal_state *pState;
|
||||
uint32_t i;
|
||||
mz_uint32 i;
|
||||
|
||||
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead))
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
|
||||
@@ -5424,9 +5523,6 @@ mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (pZip->m_total_files >= MZ_UINT32_MAX)
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
|
||||
|
||||
if (pState->m_central_dir.m_size >= MZ_UINT32_MAX)
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
|
||||
}
|
||||
@@ -5788,7 +5884,7 @@ mz_bool mz_zip_writer_init_file_v2(mz_zip_archive *pZip, const char *pFilename,
|
||||
mz_uint64 cur_ofs = 0;
|
||||
char buf[4096];
|
||||
|
||||
MZ_CLEAR_OBJ(buf);
|
||||
MZ_CLEAR_ARR(buf);
|
||||
|
||||
do
|
||||
{
|
||||
@@ -6151,7 +6247,7 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
|
||||
pState->m_zip64 = MZ_TRUE;
|
||||
/*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */
|
||||
}
|
||||
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF))
|
||||
if (((mz_uint64)buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF))
|
||||
{
|
||||
pState->m_zip64 = MZ_TRUE;
|
||||
/*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */
|
||||
@@ -6244,7 +6340,7 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
|
||||
}
|
||||
cur_archive_file_ofs += num_alignment_padding_bytes;
|
||||
|
||||
MZ_CLEAR_OBJ(local_dir_header);
|
||||
MZ_CLEAR_ARR(local_dir_header);
|
||||
|
||||
if (!store_data_uncompressed || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
|
||||
{
|
||||
@@ -6394,7 +6490,7 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
|
||||
mz_bool mz_zip_writer_add_read_buf_callback(mz_zip_archive *pZip, const char *pArchive_name, mz_file_read_func read_callback, void* callback_opaque, mz_uint64 max_size, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags,
|
||||
const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len)
|
||||
{
|
||||
mz_uint16 gen_flags = (level_and_flags & MZ_ZIP_FLAG_WRITE_HEADER_SET_SIZE) ? 0 : MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR;
|
||||
mz_uint16 gen_flags;
|
||||
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
|
||||
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
|
||||
mz_uint64 local_dir_header_ofs, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0;
|
||||
@@ -6406,13 +6502,15 @@ mz_bool mz_zip_writer_add_read_buf_callback(mz_zip_archive *pZip, const char *pA
|
||||
mz_zip_internal_state *pState;
|
||||
mz_uint64 file_ofs = 0, cur_archive_header_file_ofs;
|
||||
|
||||
if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME))
|
||||
gen_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8;
|
||||
|
||||
if ((int)level_and_flags < 0)
|
||||
level_and_flags = MZ_DEFAULT_LEVEL;
|
||||
level = level_and_flags & 0xF;
|
||||
|
||||
gen_flags = (level_and_flags & MZ_ZIP_FLAG_WRITE_HEADER_SET_SIZE) ? 0 : MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR;
|
||||
|
||||
if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME))
|
||||
gen_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8;
|
||||
|
||||
/* Sanity checks */
|
||||
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
|
||||
@@ -6497,7 +6595,7 @@ mz_bool mz_zip_writer_add_read_buf_callback(mz_zip_archive *pZip, const char *pA
|
||||
method = MZ_DEFLATED;
|
||||
}
|
||||
|
||||
MZ_CLEAR_OBJ(local_dir_header);
|
||||
MZ_CLEAR_ARR(local_dir_header);
|
||||
if (pState->m_zip64)
|
||||
{
|
||||
if (max_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX)
|
||||
@@ -6801,7 +6899,7 @@ mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
|
||||
}
|
||||
#endif /* #ifndef MINIZ_NO_STDIO */
|
||||
|
||||
static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext, mz_zip_archive *pZip, const mz_uint8 *pExt, uint32_t ext_len, mz_uint64 *pComp_size, mz_uint64 *pUncomp_size, mz_uint64 *pLocal_header_ofs, mz_uint32 *pDisk_start)
|
||||
static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext, mz_zip_archive *pZip, const mz_uint8 *pExt, mz_uint32 ext_len, mz_uint64 *pComp_size, mz_uint64 *pUncomp_size, mz_uint64 *pLocal_header_ofs, mz_uint32 *pDisk_start)
|
||||
{
|
||||
/* + 64 should be enough for any new zip64 data */
|
||||
if (!mz_zip_array_reserve(pZip, pNew_ext, ext_len + 64, MZ_FALSE))
|
||||
@@ -7117,10 +7215,10 @@ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *
|
||||
if (pZip->m_pState->m_zip64)
|
||||
{
|
||||
/* dest is zip64, so upgrade the data descriptor */
|
||||
const mz_uint32 *pSrc_descriptor = (const mz_uint32 *)((const mz_uint8 *)pBuf + (has_id ? sizeof(mz_uint32) : 0));
|
||||
const mz_uint32 src_crc32 = pSrc_descriptor[0];
|
||||
const mz_uint64 src_comp_size = pSrc_descriptor[1];
|
||||
const mz_uint64 src_uncomp_size = pSrc_descriptor[2];
|
||||
const mz_uint8 *pSrc_descriptor = (const mz_uint8 *)pBuf + (has_id ? sizeof(mz_uint32) : 0);
|
||||
const mz_uint32 src_crc32 = MZ_READ_LE32(pSrc_descriptor);
|
||||
const mz_uint64 src_comp_size = MZ_READ_LE32(pSrc_descriptor + sizeof(mz_uint32));
|
||||
const mz_uint64 src_uncomp_size = MZ_READ_LE32(pSrc_descriptor + 2*sizeof(mz_uint32));
|
||||
|
||||
mz_write_le32((mz_uint8 *)pBuf, MZ_ZIP_DATA_DESCRIPTOR_ID);
|
||||
mz_write_le32((mz_uint8 *)pBuf + sizeof(mz_uint32) * 1, src_crc32);
|
||||
@@ -7256,7 +7354,7 @@ mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip)
|
||||
|
||||
if (pState->m_zip64)
|
||||
{
|
||||
if ((pZip->m_total_files > MZ_UINT32_MAX) || (pState->m_central_dir.m_size >= MZ_UINT32_MAX))
|
||||
if ((mz_uint64)pState->m_central_dir.m_size >= MZ_UINT32_MAX)
|
||||
return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
|
||||
}
|
||||
else
|
||||
@@ -7284,7 +7382,7 @@ mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip)
|
||||
/* Write zip64 end of central directory header */
|
||||
mz_uint64 rel_ofs_to_zip64_ecdr = pZip->m_archive_size;
|
||||
|
||||
MZ_CLEAR_OBJ(hdr);
|
||||
MZ_CLEAR_ARR(hdr);
|
||||
MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDH_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG);
|
||||
MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - sizeof(mz_uint32) - sizeof(mz_uint64));
|
||||
MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS, 0x031E); /* TODO: always Unix */
|
||||
@@ -7299,7 +7397,7 @@ mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip)
|
||||
pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE;
|
||||
|
||||
/* Write zip64 end of central directory locator */
|
||||
MZ_CLEAR_OBJ(hdr);
|
||||
MZ_CLEAR_ARR(hdr);
|
||||
MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG);
|
||||
MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS, rel_ofs_to_zip64_ecdr);
|
||||
MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS, 1);
|
||||
@@ -7310,7 +7408,7 @@ mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip)
|
||||
}
|
||||
|
||||
/* Write end of central directory record */
|
||||
MZ_CLEAR_OBJ(hdr);
|
||||
MZ_CLEAR_ARR(hdr);
|
||||
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
|
||||
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files));
|
||||
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files));
|
||||
@@ -7626,7 +7724,9 @@ const char *mz_zip_get_error_string(mz_zip_error mz_err)
|
||||
case MZ_ZIP_VALIDATION_FAILED:
|
||||
return "validation failed";
|
||||
case MZ_ZIP_WRITE_CALLBACK_FAILED:
|
||||
return "write calledback failed";
|
||||
return "write callback failed";
|
||||
case MZ_ZIP_TOTAL_ERRORS:
|
||||
return "total errors";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#ifndef MINIZ_EXPORT
|
||||
#define MINIZ_EXPORT
|
||||
/* miniz.c 2.2.0 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
|
||||
#endif
|
||||
/* miniz.c 3.0.2 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
|
||||
See "unlicense" statement at the end of this file.
|
||||
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
|
||||
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
|
||||
@@ -115,8 +117,8 @@
|
||||
|
||||
|
||||
|
||||
/* Defines to completely disable specific portions of miniz.c:
|
||||
If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. */
|
||||
/* Defines to completely disable specific portions of miniz.c:
|
||||
If all macros here are defined the only functionality remaining will be CRC-32 and adler-32. */
|
||||
|
||||
/* Define MINIZ_NO_STDIO to disable all usage and any functions which rely on stdio for file I/O. */
|
||||
/*#define MINIZ_NO_STDIO */
|
||||
@@ -126,6 +128,12 @@
|
||||
/* The current downside is the times written to your archives will be from 1979. */
|
||||
/*#define MINIZ_NO_TIME */
|
||||
|
||||
/* Define MINIZ_NO_DEFLATE_APIS to disable all compression API's. */
|
||||
/*#define MINIZ_NO_DEFLATE_APIS */
|
||||
|
||||
/* Define MINIZ_NO_INFLATE_APIS to disable all decompression API's. */
|
||||
/*#define MINIZ_NO_INFLATE_APIS */
|
||||
|
||||
/* Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. */
|
||||
/*#define MINIZ_NO_ARCHIVE_APIS */
|
||||
|
||||
@@ -138,12 +146,20 @@
|
||||
/* Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent conflicts against stock zlib. */
|
||||
/*#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES */
|
||||
|
||||
/* Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
|
||||
/* Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
|
||||
Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc
|
||||
callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user
|
||||
functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work. */
|
||||
/*#define MINIZ_NO_MALLOC */
|
||||
|
||||
#ifdef MINIZ_NO_INFLATE_APIS
|
||||
#define MINIZ_NO_ARCHIVE_APIS
|
||||
#endif
|
||||
|
||||
#ifdef MINIZ_NO_DEFLATE_APIS
|
||||
#define MINIZ_NO_ARCHIVE_WRITING_APIS
|
||||
#endif
|
||||
|
||||
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
|
||||
/* TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc on Linux */
|
||||
#define MINIZ_NO_TIME
|
||||
@@ -162,18 +178,40 @@
|
||||
#define MINIZ_X86_OR_X64_CPU 0
|
||||
#endif
|
||||
|
||||
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
|
||||
/* Set MINIZ_LITTLE_ENDIAN only if not set */
|
||||
#if !defined(MINIZ_LITTLE_ENDIAN)
|
||||
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__)
|
||||
|
||||
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
/* Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. */
|
||||
#define MINIZ_LITTLE_ENDIAN 1
|
||||
#else
|
||||
#define MINIZ_LITTLE_ENDIAN 0
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#if MINIZ_X86_OR_X64_CPU
|
||||
#define MINIZ_LITTLE_ENDIAN 1
|
||||
#else
|
||||
#define MINIZ_LITTLE_ENDIAN 0
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Using unaligned loads and stores causes errors when using UBSan */
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(undefined_behavior_sanitizer)
|
||||
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES only if not set */
|
||||
#if !defined(MINIZ_USE_UNALIGNED_LOADS_AND_STORES)
|
||||
#if MINIZ_X86_OR_X64_CPU
|
||||
/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses. */
|
||||
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
|
||||
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
|
||||
#define MINIZ_UNALIGNED_USE_MEMCPY
|
||||
#else
|
||||
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
|
||||
@@ -237,9 +275,9 @@ enum
|
||||
MZ_DEFAULT_COMPRESSION = -1
|
||||
};
|
||||
|
||||
#define MZ_VERSION "10.2.0"
|
||||
#define MZ_VERNUM 0xA100
|
||||
#define MZ_VER_MAJOR 10
|
||||
#define MZ_VERSION "11.0.2"
|
||||
#define MZ_VERNUM 0xB002
|
||||
#define MZ_VER_MAJOR 11
|
||||
#define MZ_VER_MINOR 2
|
||||
#define MZ_VER_REVISION 0
|
||||
#define MZ_VER_SUBREVISION 0
|
||||
@@ -305,6 +343,8 @@ typedef mz_stream *mz_streamp;
|
||||
/* Returns the version string of miniz.c. */
|
||||
MINIZ_EXPORT const char *mz_version(void);
|
||||
|
||||
#ifndef MINIZ_NO_DEFLATE_APIS
|
||||
|
||||
/* mz_deflateInit() initializes a compressor with default options: */
|
||||
/* Parameters: */
|
||||
/* pStream must point to an initialized mz_stream struct. */
|
||||
@@ -357,6 +397,10 @@ MINIZ_EXPORT int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const u
|
||||
/* mz_compressBound() returns a (very) conservative upper bound on the amount of data that could be generated by calling mz_compress(). */
|
||||
MINIZ_EXPORT mz_ulong mz_compressBound(mz_ulong source_len);
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_DEFLATE_APIS*/
|
||||
|
||||
#ifndef MINIZ_NO_INFLATE_APIS
|
||||
|
||||
/* Initializes a decompressor. */
|
||||
MINIZ_EXPORT int mz_inflateInit(mz_streamp pStream);
|
||||
|
||||
@@ -390,6 +434,7 @@ MINIZ_EXPORT int mz_inflateEnd(mz_streamp pStream);
|
||||
/* Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. */
|
||||
MINIZ_EXPORT int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
|
||||
MINIZ_EXPORT int mz_uncompress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong *pSource_len);
|
||||
#endif /*#ifndef MINIZ_NO_INFLATE_APIS*/
|
||||
|
||||
/* Returns a string description of the specified error code, or NULL if the error code is invalid. */
|
||||
MINIZ_EXPORT const char *mz_error(int err);
|
||||
@@ -440,6 +485,8 @@ typedef void *const voidpc;
|
||||
#define free_func mz_free_func
|
||||
#define internal_state mz_internal_state
|
||||
#define z_stream mz_stream
|
||||
|
||||
#ifndef MINIZ_NO_DEFLATE_APIS
|
||||
#define deflateInit mz_deflateInit
|
||||
#define deflateInit2 mz_deflateInit2
|
||||
#define deflateReset mz_deflateReset
|
||||
@@ -449,6 +496,9 @@ typedef void *const voidpc;
|
||||
#define compress mz_compress
|
||||
#define compress2 mz_compress2
|
||||
#define compressBound mz_compressBound
|
||||
#endif /*#ifndef MINIZ_NO_DEFLATE_APIS*/
|
||||
|
||||
#ifndef MINIZ_NO_INFLATE_APIS
|
||||
#define inflateInit mz_inflateInit
|
||||
#define inflateInit2 mz_inflateInit2
|
||||
#define inflateReset mz_inflateReset
|
||||
@@ -456,6 +506,8 @@ typedef void *const voidpc;
|
||||
#define inflateEnd mz_inflateEnd
|
||||
#define uncompress mz_uncompress
|
||||
#define uncompress2 mz_uncompress2
|
||||
#endif /*#ifndef MINIZ_NO_INFLATE_APIS*/
|
||||
|
||||
#define crc32 mz_crc32
|
||||
#define adler32 mz_adler32
|
||||
#define MAX_WBITS 15
|
||||
@@ -519,7 +571,8 @@ typedef int mz_bool;
|
||||
#ifdef MINIZ_NO_TIME
|
||||
typedef struct mz_dummy_time_t_tag
|
||||
{
|
||||
int m_dummy;
|
||||
mz_uint32 m_dummy1;
|
||||
mz_uint32 m_dummy2;
|
||||
} mz_dummy_time_t;
|
||||
#define MZ_TIME_T mz_dummy_time_t
|
||||
#else
|
||||
@@ -541,6 +594,8 @@ typedef struct mz_dummy_time_t_tag
|
||||
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
|
||||
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
|
||||
#define MZ_CLEAR_ARR(obj) memset((obj), 0, sizeof(obj))
|
||||
#define MZ_CLEAR_PTR(obj) memset((obj), 0, sizeof(*obj))
|
||||
|
||||
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
|
||||
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
|
||||
@@ -577,6 +632,8 @@ extern MINIZ_EXPORT void *miniz_def_realloc_func(void *opaque, void *address, si
|
||||
#pragma once
|
||||
|
||||
|
||||
#ifndef MINIZ_NO_DEFLATE_APIS
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -764,10 +821,14 @@ MINIZ_EXPORT void tdefl_compressor_free(tdefl_compressor *pComp);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_DEFLATE_APIS*/
|
||||
#pragma once
|
||||
|
||||
/* ------------------- Low-level Decompression API Definitions */
|
||||
|
||||
#ifndef MINIZ_NO_INFLATE_APIS
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@@ -876,12 +937,6 @@ enum
|
||||
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
|
||||
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
|
||||
} tinfl_huff_table;
|
||||
|
||||
#if MINIZ_HAS_64BIT_REGISTERS
|
||||
#define TINFL_USE_64BIT_BITBUF 1
|
||||
#else
|
||||
@@ -901,14 +956,22 @@ struct tinfl_decompressor_tag
|
||||
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
|
||||
tinfl_bit_buf_t m_bit_buf;
|
||||
size_t m_dist_from_out_buf_start;
|
||||
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
|
||||
mz_int16 m_look_up[TINFL_MAX_HUFF_TABLES][TINFL_FAST_LOOKUP_SIZE];
|
||||
mz_int16 m_tree_0[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
|
||||
mz_int16 m_tree_1[TINFL_MAX_HUFF_SYMBOLS_1 * 2];
|
||||
mz_int16 m_tree_2[TINFL_MAX_HUFF_SYMBOLS_2 * 2];
|
||||
mz_uint8 m_code_size_0[TINFL_MAX_HUFF_SYMBOLS_0];
|
||||
mz_uint8 m_code_size_1[TINFL_MAX_HUFF_SYMBOLS_1];
|
||||
mz_uint8 m_code_size_2[TINFL_MAX_HUFF_SYMBOLS_2];
|
||||
mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /*#ifndef MINIZ_NO_INFLATE_APIS*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
||||
@@ -942,10 +1005,6 @@ typedef struct
|
||||
mz_uint16 m_bit_flag;
|
||||
mz_uint16 m_method;
|
||||
|
||||
#ifndef MINIZ_NO_TIME
|
||||
MZ_TIME_T m_time;
|
||||
#endif
|
||||
|
||||
/* CRC-32 of uncompressed data. */
|
||||
mz_uint32 m_crc32;
|
||||
|
||||
@@ -982,6 +1041,11 @@ typedef struct
|
||||
/* Guaranteed to be zero terminated, may be truncated to fit. */
|
||||
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
|
||||
|
||||
#ifdef MINIZ_NO_TIME
|
||||
MZ_TIME_T m_padding;
|
||||
#else
|
||||
MZ_TIME_T m_time;
|
||||
#endif
|
||||
} mz_zip_archive_file_stat;
|
||||
|
||||
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n);
|
||||
@@ -1093,9 +1157,7 @@ typedef struct
|
||||
mz_uint flags;
|
||||
|
||||
int status;
|
||||
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
|
||||
mz_uint file_crc32;
|
||||
#endif
|
||||
|
||||
mz_uint64 read_buf_size, read_buf_ofs, read_buf_avail, comp_remaining, out_buf_ofs, cur_file_ofs;
|
||||
mz_zip_archive_file_stat file_stat;
|
||||
void *pRead_buf;
|
||||
@@ -1105,6 +1167,12 @@ typedef struct
|
||||
|
||||
tinfl_decompressor inflator;
|
||||
|
||||
#ifdef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
|
||||
mz_uint padding;
|
||||
#else
|
||||
mz_uint file_crc32;
|
||||
#endif
|
||||
|
||||
} mz_zip_reader_extract_iter_state;
|
||||
|
||||
/* -------- ZIP reading */
|
||||
@@ -1228,9 +1296,9 @@ MINIZ_EXPORT mz_bool mz_zip_reader_extract_file_to_cfile(mz_zip_archive *pZip, c
|
||||
/* TODO */
|
||||
typedef void *mz_zip_streaming_extract_state_ptr;
|
||||
mz_zip_streaming_extract_state_ptr mz_zip_streaming_extract_begin(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags);
|
||||
uint64_t mz_zip_streaming_extract_get_size(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
|
||||
uint64_t mz_zip_streaming_extract_get_cur_ofs(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
|
||||
mz_bool mz_zip_streaming_extract_seek(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, uint64_t new_ofs);
|
||||
mz_uint64 mz_zip_streaming_extract_get_size(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
|
||||
mz_uint64 mz_zip_streaming_extract_get_cur_ofs(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
|
||||
mz_bool mz_zip_streaming_extract_seek(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, mz_uint64 new_ofs);
|
||||
size_t mz_zip_streaming_extract_read(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, void *pBuf, size_t buf_size);
|
||||
mz_bool mz_zip_streaming_extract_end(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
|
||||
#endif
|
||||
@@ -1244,7 +1312,9 @@ MINIZ_EXPORT mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags
|
||||
|
||||
/* Misc utils/helpers, valid for ZIP reading or writing */
|
||||
MINIZ_EXPORT mz_bool mz_zip_validate_mem_archive(const void *pMem, size_t size, mz_uint flags, mz_zip_error *pErr);
|
||||
#ifndef MINIZ_NO_STDIO
|
||||
MINIZ_EXPORT mz_bool mz_zip_validate_file_archive(const char *pFilename, mz_uint flags, mz_zip_error *pErr);
|
||||
#endif
|
||||
|
||||
/* Universal end function - calls either mz_zip_reader_end() or mz_zip_writer_end(). */
|
||||
MINIZ_EXPORT mz_bool mz_zip_end(mz_zip_archive *pZip);
|
||||
@@ -1318,7 +1388,7 @@ MINIZ_EXPORT mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_
|
||||
/* An archive must be manually finalized by calling this function for it to be valid. */
|
||||
MINIZ_EXPORT mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
|
||||
|
||||
/* Finalizes a heap archive, returning a poiner to the heap block and its size. */
|
||||
/* Finalizes a heap archive, returning a pointer to the heap block and its size. */
|
||||
/* The heap block will be allocated using the mz_zip_archive's alloc/realloc callbacks. */
|
||||
MINIZ_EXPORT mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **ppBuf, size_t *pSize);
|
||||
|
||||
@@ -1335,11 +1405,13 @@ MINIZ_EXPORT mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
|
||||
MINIZ_EXPORT mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags);
|
||||
MINIZ_EXPORT mz_bool mz_zip_add_mem_to_archive_file_in_place_v2(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_zip_error *pErr);
|
||||
|
||||
#ifndef MINIZ_NO_STDIO
|
||||
/* Reads a single file from an archive into a heap block. */
|
||||
/* If pComment is not NULL, only the file with the specified comment will be extracted. */
|
||||
/* Returns NULL on failure. */
|
||||
MINIZ_EXPORT void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags);
|
||||
MINIZ_EXPORT void *mz_zip_extract_archive_file_to_heap_v2(const char *pZip_filename, const char *pArchive_name, const char *pComment, size_t *pSize, mz_uint flags, mz_zip_error *pErr);
|
||||
#endif
|
||||
|
||||
#endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS */
|
||||
|
||||
|
||||
47
ext/pugixml/CMakeLists.txt
Normal file
47
ext/pugixml/CMakeLists.txt
Normal file
@@ -0,0 +1,47 @@
|
||||
project(pugixml VERSION 1.14)
|
||||
|
||||
# Expose options from the pugiconfig.hpp
|
||||
option(PUGIXML_WCHAR_MODE "Enable wchar_t mode" OFF)
|
||||
option(PUGIXML_COMPACT "Enable compact mode" OFF)
|
||||
|
||||
# Advanced options from pugiconfig.hpp
|
||||
option(PUGIXML_NO_XPATH "Disable XPath" OFF)
|
||||
option(PUGIXML_NO_STL "Disable STL" OFF)
|
||||
option(PUGIXML_NO_EXCEPTIONS "Disable Exceptions" OFF)
|
||||
mark_as_advanced(PUGIXML_NO_XPATH PUGIXML_NO_STL PUGIXML_NO_EXCEPTIONS)
|
||||
|
||||
set(PUGIXML_PUBLIC_DEFINITIONS
|
||||
$<$<BOOL:${PUGIXML_WCHAR_MODE}>:PUGIXML_WCHAR_MODE>
|
||||
$<$<BOOL:${PUGIXML_COMPACT}>:PUGIXML_COMPACT>
|
||||
$<$<BOOL:${PUGIXML_NO_XPATH}>:PUGIXML_NO_XPATH>
|
||||
$<$<BOOL:${PUGIXML_NO_STL}>:PUGIXML_NO_STL>
|
||||
$<$<BOOL:${PUGIXML_NO_EXCEPTIONS}>:PUGIXML_NO_EXCEPTIONS>)
|
||||
|
||||
add_library(pugixml STATIC
|
||||
src/pugiconfig.hpp
|
||||
src/pugixml.hpp
|
||||
src/pugixml.cpp)
|
||||
add_library(pugixml::static ALIAS pugixml)
|
||||
|
||||
set_target_properties(pugixml PROPERTIES
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
CXX_STANDARD 11)
|
||||
|
||||
set_property(TARGET pugixml PROPERTY EXPORT_NAME static)
|
||||
target_include_directories(pugixml PUBLIC
|
||||
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src>)
|
||||
target_compile_definitions(pugixml PUBLIC
|
||||
${PUGIXML_BUILD_DEFINES}
|
||||
${PUGIXML_PUBLIC_DEFINITIONS})
|
||||
|
||||
add_library(pugixml::pugixml ALIAS pugixml)
|
||||
|
||||
set_target_properties(pugixml PROPERTIES
|
||||
EXCLUDE_FROM_ALL ON
|
||||
POSITION_INDEPENDENT_CODE ON
|
||||
SOVERSION ${PROJECT_VERSION_MAJOR}
|
||||
VERSION ${PROJECT_VERSION}
|
||||
OUTPUT_NAME pugixml)
|
||||
|
||||
set_target_properties(pugixml PROPERTIES
|
||||
EXCLUDE_FROM_ALL OFF)
|
||||
24
ext/pugixml/LICENSE.md
Normal file
24
ext/pugixml/LICENSE.md
Normal file
@@ -0,0 +1,24 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2006-2023 Arseny Kapoulkine
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
77
ext/pugixml/src/pugiconfig.hpp
Normal file
77
ext/pugixml/src/pugiconfig.hpp
Normal file
@@ -0,0 +1,77 @@
|
||||
/**
|
||||
* pugixml parser - version 1.14
|
||||
* --------------------------------------------------------
|
||||
* Copyright (C) 2006-2023, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)
|
||||
* Report bugs and download new versions at https://pugixml.org/
|
||||
*
|
||||
* This library is distributed under the MIT License. See notice at the end
|
||||
* of this file.
|
||||
*
|
||||
* This work is based on the pugxml parser, which is:
|
||||
* Copyright (C) 2003, by Kristen Wegner (kristen@tima.net)
|
||||
*/
|
||||
|
||||
#ifndef HEADER_PUGICONFIG_HPP
|
||||
#define HEADER_PUGICONFIG_HPP
|
||||
|
||||
// Uncomment this to enable wchar_t mode
|
||||
// #define PUGIXML_WCHAR_MODE
|
||||
|
||||
// Uncomment this to enable compact mode
|
||||
// #define PUGIXML_COMPACT
|
||||
|
||||
// Uncomment this to disable XPath
|
||||
// #define PUGIXML_NO_XPATH
|
||||
|
||||
// Uncomment this to disable STL
|
||||
// #define PUGIXML_NO_STL
|
||||
|
||||
// Uncomment this to disable exceptions
|
||||
// #define PUGIXML_NO_EXCEPTIONS
|
||||
|
||||
// Set this to control attributes for public classes/functions, i.e.:
|
||||
// #define PUGIXML_API __declspec(dllexport) // to export all public symbols from DLL
|
||||
// #define PUGIXML_CLASS __declspec(dllimport) // to import all classes from DLL
|
||||
// #define PUGIXML_FUNCTION __fastcall // to set calling conventions to all public functions to fastcall
|
||||
// In absence of PUGIXML_CLASS/PUGIXML_FUNCTION definitions PUGIXML_API is used instead
|
||||
|
||||
// Tune these constants to adjust memory-related behavior
|
||||
// #define PUGIXML_MEMORY_PAGE_SIZE 32768
|
||||
// #define PUGIXML_MEMORY_OUTPUT_STACK 10240
|
||||
// #define PUGIXML_MEMORY_XPATH_PAGE_SIZE 4096
|
||||
|
||||
// Tune this constant to adjust max nesting for XPath queries
|
||||
// #define PUGIXML_XPATH_DEPTH_LIMIT 1024
|
||||
|
||||
// Uncomment this to switch to header-only version
|
||||
// #define PUGIXML_HEADER_ONLY
|
||||
|
||||
// Uncomment this to enable long long support
|
||||
// #define PUGIXML_HAS_LONG_LONG
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Copyright (c) 2006-2023 Arseny Kapoulkine
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use,
|
||||
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following
|
||||
* conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
13226
ext/pugixml/src/pugixml.cpp
Normal file
13226
ext/pugixml/src/pugixml.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1516
ext/pugixml/src/pugixml.hpp
Normal file
1516
ext/pugixml/src/pugixml.hpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +0,0 @@
|
||||
add_library(rapidxml INTERFACE)
|
||||
add_library(External::rapidxml ALIAS rapidxml)
|
||||
target_include_directories(rapidxml
|
||||
INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
@@ -1,52 +0,0 @@
|
||||
Use of this software is granted under one of the following two licenses,
|
||||
to be chosen freely by the user.
|
||||
|
||||
1. Boost Software License - Version 1.0 - August 17th, 2003
|
||||
===============================================================================
|
||||
|
||||
Copyright (c) 2006, 2007 Marcin Kalicinski
|
||||
|
||||
Permission is hereby granted, free of charge, to any person or organization
|
||||
obtaining a copy of the software and accompanying documentation covered by
|
||||
this license (the "Software") to use, reproduce, display, distribute,
|
||||
execute, and transmit the Software, and to prepare derivative works of the
|
||||
Software, and to permit third-parties to whom the Software is furnished to
|
||||
do so, all subject to the following:
|
||||
|
||||
The copyright notices in the Software and this entire statement, including
|
||||
the above license grant, this restriction and the following disclaimer,
|
||||
must be included in all copies of the Software, in whole or in part, and
|
||||
all derivative works of the Software, unless such copies or derivative
|
||||
works are solely in the form of machine-executable object code generated by
|
||||
a source language processor.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
|
||||
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
|
||||
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
2. The MIT License
|
||||
===============================================================================
|
||||
|
||||
Copyright (c) 2006, 2007 Marcin Kalicinski
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,174 +0,0 @@
|
||||
#ifndef RAPIDXML_ITERATORS_HPP_INCLUDED
|
||||
#define RAPIDXML_ITERATORS_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2006, 2009 Marcin Kalicinski
|
||||
// Version 1.13
|
||||
// Revision $DateTime: 2009/05/13 01:46:17 $
|
||||
//! \file rapidxml_iterators.hpp This file contains rapidxml iterators
|
||||
|
||||
#include "rapidxml.hpp"
|
||||
|
||||
namespace rapidxml
|
||||
{
|
||||
|
||||
//! Iterator of child nodes of xml_node
|
||||
template<class Ch>
|
||||
class node_iterator
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
typedef typename xml_node<Ch> value_type;
|
||||
typedef typename xml_node<Ch> &reference;
|
||||
typedef typename xml_node<Ch> *pointer;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef std::bidirectional_iterator_tag iterator_category;
|
||||
|
||||
node_iterator()
|
||||
: m_node(0)
|
||||
{
|
||||
}
|
||||
|
||||
node_iterator(xml_node<Ch> *node)
|
||||
: m_node(node->first_node())
|
||||
{
|
||||
}
|
||||
|
||||
reference operator *() const
|
||||
{
|
||||
assert(m_node);
|
||||
return *m_node;
|
||||
}
|
||||
|
||||
pointer operator->() const
|
||||
{
|
||||
assert(m_node);
|
||||
return m_node;
|
||||
}
|
||||
|
||||
node_iterator& operator++()
|
||||
{
|
||||
assert(m_node);
|
||||
m_node = m_node->next_sibling();
|
||||
return *this;
|
||||
}
|
||||
|
||||
node_iterator operator++(int)
|
||||
{
|
||||
node_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
node_iterator& operator--()
|
||||
{
|
||||
assert(m_node && m_node->previous_sibling());
|
||||
m_node = m_node->previous_sibling();
|
||||
return *this;
|
||||
}
|
||||
|
||||
node_iterator operator--(int)
|
||||
{
|
||||
node_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool operator ==(const node_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_node == rhs.m_node;
|
||||
}
|
||||
|
||||
bool operator !=(const node_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_node != rhs.m_node;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
xml_node<Ch> *m_node;
|
||||
|
||||
};
|
||||
|
||||
//! Iterator of child attributes of xml_node
|
||||
template<class Ch>
|
||||
class attribute_iterator
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
typedef typename xml_attribute<Ch> value_type;
|
||||
typedef typename xml_attribute<Ch> &reference;
|
||||
typedef typename xml_attribute<Ch> *pointer;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef std::bidirectional_iterator_tag iterator_category;
|
||||
|
||||
attribute_iterator()
|
||||
: m_attribute(0)
|
||||
{
|
||||
}
|
||||
|
||||
attribute_iterator(xml_node<Ch> *node)
|
||||
: m_attribute(node->first_attribute())
|
||||
{
|
||||
}
|
||||
|
||||
reference operator *() const
|
||||
{
|
||||
assert(m_attribute);
|
||||
return *m_attribute;
|
||||
}
|
||||
|
||||
pointer operator->() const
|
||||
{
|
||||
assert(m_attribute);
|
||||
return m_attribute;
|
||||
}
|
||||
|
||||
attribute_iterator& operator++()
|
||||
{
|
||||
assert(m_attribute);
|
||||
m_attribute = m_attribute->next_attribute();
|
||||
return *this;
|
||||
}
|
||||
|
||||
attribute_iterator operator++(int)
|
||||
{
|
||||
attribute_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
attribute_iterator& operator--()
|
||||
{
|
||||
assert(m_attribute && m_attribute->previous_attribute());
|
||||
m_attribute = m_attribute->previous_attribute();
|
||||
return *this;
|
||||
}
|
||||
|
||||
attribute_iterator operator--(int)
|
||||
{
|
||||
attribute_iterator tmp = *this;
|
||||
++this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool operator ==(const attribute_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_attribute == rhs.m_attribute;
|
||||
}
|
||||
|
||||
bool operator !=(const attribute_iterator<Ch> &rhs)
|
||||
{
|
||||
return m_attribute != rhs.m_attribute;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
xml_attribute<Ch> *m_attribute;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,421 +0,0 @@
|
||||
#ifndef RAPIDXML_PRINT_HPP_INCLUDED
|
||||
#define RAPIDXML_PRINT_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2006, 2009 Marcin Kalicinski
|
||||
// Version 1.13
|
||||
// Revision $DateTime: 2009/05/13 01:46:17 $
|
||||
//! \file rapidxml_print.hpp This file contains rapidxml printer implementation
|
||||
|
||||
#include "rapidxml.hpp"
|
||||
|
||||
// Only include streams if not disabled
|
||||
#ifndef RAPIDXML_NO_STREAMS
|
||||
#include <ostream>
|
||||
#include <iterator>
|
||||
#endif
|
||||
|
||||
namespace rapidxml
|
||||
{
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// Printing flags
|
||||
|
||||
const int print_no_indenting = 0x1; //!< Printer flag instructing the printer to suppress indenting of XML. See print() function.
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// Internal
|
||||
|
||||
//! \cond internal
|
||||
namespace internal
|
||||
{
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Internal character operations
|
||||
|
||||
// Copy characters from given range to given output iterator
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt copy_chars(const Ch *begin, const Ch *end, OutIt out)
|
||||
{
|
||||
while (begin != end)
|
||||
*out++ = *begin++;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Copy characters from given range to given output iterator and expand
|
||||
// characters into references (< > ' " &)
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt copy_and_expand_chars(const Ch *begin, const Ch *end, Ch noexpand, OutIt out)
|
||||
{
|
||||
while (begin != end)
|
||||
{
|
||||
if (*begin == noexpand)
|
||||
{
|
||||
*out++ = *begin; // No expansion, copy character
|
||||
}
|
||||
else
|
||||
{
|
||||
switch (*begin)
|
||||
{
|
||||
case Ch('<'):
|
||||
*out++ = Ch('&'); *out++ = Ch('l'); *out++ = Ch('t'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('>'):
|
||||
*out++ = Ch('&'); *out++ = Ch('g'); *out++ = Ch('t'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('\''):
|
||||
*out++ = Ch('&'); *out++ = Ch('a'); *out++ = Ch('p'); *out++ = Ch('o'); *out++ = Ch('s'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('"'):
|
||||
*out++ = Ch('&'); *out++ = Ch('q'); *out++ = Ch('u'); *out++ = Ch('o'); *out++ = Ch('t'); *out++ = Ch(';');
|
||||
break;
|
||||
case Ch('&'):
|
||||
*out++ = Ch('&'); *out++ = Ch('a'); *out++ = Ch('m'); *out++ = Ch('p'); *out++ = Ch(';');
|
||||
break;
|
||||
default:
|
||||
*out++ = *begin; // No expansion, copy character
|
||||
}
|
||||
}
|
||||
++begin; // Step to next character
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Fill given output iterator with repetitions of the same character
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt fill_chars(OutIt out, int n, Ch ch)
|
||||
{
|
||||
for (int i = 0; i < n; ++i)
|
||||
*out++ = ch;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Find character
|
||||
template<class Ch, Ch ch>
|
||||
inline bool find_char(const Ch *begin, const Ch *end)
|
||||
{
|
||||
while (begin != end)
|
||||
if (*begin++ == ch)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Internal printing operations
|
||||
|
||||
// Print node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
// Print proper node type
|
||||
switch (node->type())
|
||||
{
|
||||
|
||||
// Document
|
||||
case node_document:
|
||||
out = print_children(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Element
|
||||
case node_element:
|
||||
out = print_element_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Data
|
||||
case node_data:
|
||||
out = print_data_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// CDATA
|
||||
case node_cdata:
|
||||
out = print_cdata_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Declaration
|
||||
case node_declaration:
|
||||
out = print_declaration_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Comment
|
||||
case node_comment:
|
||||
out = print_comment_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Doctype
|
||||
case node_doctype:
|
||||
out = print_doctype_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Pi
|
||||
case node_pi:
|
||||
out = print_pi_node(out, node, flags, indent);
|
||||
break;
|
||||
|
||||
// Unknown
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
// If indenting not disabled, add line break after node
|
||||
if (!(flags & print_no_indenting))
|
||||
*out = Ch('\n'), ++out;
|
||||
|
||||
// Return modified iterator
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print children of the node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_children(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
for (xml_node<Ch> *child = node->first_node(); child; child = child->next_sibling())
|
||||
out = print_node(out, child, flags, indent);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print attributes of the node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_attributes(OutIt out, const xml_node<Ch> *node, int flags)
|
||||
{
|
||||
for (xml_attribute<Ch> *attribute = node->first_attribute(); attribute; attribute = attribute->next_attribute())
|
||||
{
|
||||
if (attribute->name() && attribute->value())
|
||||
{
|
||||
// Print attribute name
|
||||
*out = Ch(' '), ++out;
|
||||
out = copy_chars(attribute->name(), attribute->name() + attribute->name_size(), out);
|
||||
*out = Ch('='), ++out;
|
||||
// Print attribute value using appropriate quote type
|
||||
if (find_char<Ch, Ch('"')>(attribute->value(), attribute->value() + attribute->value_size()))
|
||||
{
|
||||
*out = Ch('\''), ++out;
|
||||
out = copy_and_expand_chars(attribute->value(), attribute->value() + attribute->value_size(), Ch('"'), out);
|
||||
*out = Ch('\''), ++out;
|
||||
}
|
||||
else
|
||||
{
|
||||
*out = Ch('"'), ++out;
|
||||
out = copy_and_expand_chars(attribute->value(), attribute->value() + attribute->value_size(), Ch('\''), out);
|
||||
*out = Ch('"'), ++out;
|
||||
}
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print data node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_data_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_data);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
out = copy_and_expand_chars(node->value(), node->value() + node->value_size(), Ch(0), out);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print data node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_cdata_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_cdata);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'); ++out;
|
||||
*out = Ch('!'); ++out;
|
||||
*out = Ch('['); ++out;
|
||||
*out = Ch('C'); ++out;
|
||||
*out = Ch('D'); ++out;
|
||||
*out = Ch('A'); ++out;
|
||||
*out = Ch('T'); ++out;
|
||||
*out = Ch('A'); ++out;
|
||||
*out = Ch('['); ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch(']'); ++out;
|
||||
*out = Ch(']'); ++out;
|
||||
*out = Ch('>'); ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print element node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_element_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_element);
|
||||
|
||||
// Print element name and attributes, if any
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
out = copy_chars(node->name(), node->name() + node->name_size(), out);
|
||||
out = print_attributes(out, node, flags);
|
||||
|
||||
// If node is childless
|
||||
if (node->value_size() == 0 && !node->first_node())
|
||||
{
|
||||
// Print childless node tag ending
|
||||
*out = Ch('/'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Print normal node tag ending
|
||||
*out = Ch('>'), ++out;
|
||||
|
||||
// Test if node contains a single data node only (and no other nodes)
|
||||
xml_node<Ch> *child = node->first_node();
|
||||
if (!child)
|
||||
{
|
||||
// If node has no children, only print its value without indenting
|
||||
out = copy_and_expand_chars(node->value(), node->value() + node->value_size(), Ch(0), out);
|
||||
}
|
||||
else if (child->next_sibling() == 0 && child->type() == node_data)
|
||||
{
|
||||
// If node has a sole data child, only print its value without indenting
|
||||
out = copy_and_expand_chars(child->value(), child->value() + child->value_size(), Ch(0), out);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Print all children with full indenting
|
||||
if (!(flags & print_no_indenting))
|
||||
*out = Ch('\n'), ++out;
|
||||
out = print_children(out, node, flags, indent + 1);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
}
|
||||
|
||||
// Print node end
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('/'), ++out;
|
||||
out = copy_chars(node->name(), node->name() + node->name_size(), out);
|
||||
*out = Ch('>'), ++out;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print declaration node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_declaration_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
// Print declaration start
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('?'), ++out;
|
||||
*out = Ch('x'), ++out;
|
||||
*out = Ch('m'), ++out;
|
||||
*out = Ch('l'), ++out;
|
||||
|
||||
// Print attributes
|
||||
out = print_attributes(out, node, flags);
|
||||
|
||||
// Print declaration end
|
||||
*out = Ch('?'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print comment node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_comment_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_comment);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('!'), ++out;
|
||||
*out = Ch('-'), ++out;
|
||||
*out = Ch('-'), ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch('-'), ++out;
|
||||
*out = Ch('-'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print doctype node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_doctype_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_doctype);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('!'), ++out;
|
||||
*out = Ch('D'), ++out;
|
||||
*out = Ch('O'), ++out;
|
||||
*out = Ch('C'), ++out;
|
||||
*out = Ch('T'), ++out;
|
||||
*out = Ch('Y'), ++out;
|
||||
*out = Ch('P'), ++out;
|
||||
*out = Ch('E'), ++out;
|
||||
*out = Ch(' '), ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch('>'), ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
// Print pi node
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print_pi_node(OutIt out, const xml_node<Ch> *node, int flags, int indent)
|
||||
{
|
||||
assert(node->type() == node_pi);
|
||||
if (!(flags & print_no_indenting))
|
||||
out = fill_chars(out, indent, Ch('\t'));
|
||||
*out = Ch('<'), ++out;
|
||||
*out = Ch('?'), ++out;
|
||||
out = copy_chars(node->name(), node->name() + node->name_size(), out);
|
||||
*out = Ch(' '), ++out;
|
||||
out = copy_chars(node->value(), node->value() + node->value_size(), out);
|
||||
*out = Ch('?'), ++out;
|
||||
*out = Ch('>'), ++out;
|
||||
return out;
|
||||
}
|
||||
|
||||
}
|
||||
//! \endcond
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Printing
|
||||
|
||||
//! Prints XML to given output iterator.
|
||||
//! \param out Output iterator to print to.
|
||||
//! \param node Node to be printed. Pass xml_document to print entire document.
|
||||
//! \param flags Flags controlling how XML is printed.
|
||||
//! \return Output iterator pointing to position immediately after last character of printed text.
|
||||
template<class OutIt, class Ch>
|
||||
inline OutIt print(OutIt out, const xml_node<Ch> &node, int flags = 0)
|
||||
{
|
||||
return internal::print_node(out, &node, flags, 0);
|
||||
}
|
||||
|
||||
#ifndef RAPIDXML_NO_STREAMS
|
||||
|
||||
//! Prints XML to given output stream.
|
||||
//! \param out Output stream to print to.
|
||||
//! \param node Node to be printed. Pass xml_document to print entire document.
|
||||
//! \param flags Flags controlling how XML is printed.
|
||||
//! \return Output stream.
|
||||
template<class Ch>
|
||||
inline std::basic_ostream<Ch> &print(std::basic_ostream<Ch> &out, const xml_node<Ch> &node, int flags = 0)
|
||||
{
|
||||
print(std::ostream_iterator<Ch>(out), node, flags);
|
||||
return out;
|
||||
}
|
||||
|
||||
//! Prints formatted XML to given output stream. Uses default printing flags. Use print() function to customize printing process.
|
||||
//! \param out Output stream to print to.
|
||||
//! \param node Node to be printed.
|
||||
//! \return Output stream.
|
||||
template<class Ch>
|
||||
inline std::basic_ostream<Ch> &operator <<(std::basic_ostream<Ch> &out, const xml_node<Ch> &node)
|
||||
{
|
||||
return print(out, node);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,122 +0,0 @@
|
||||
#ifndef RAPIDXML_UTILS_HPP_INCLUDED
|
||||
#define RAPIDXML_UTILS_HPP_INCLUDED
|
||||
|
||||
// Copyright (C) 2006, 2009 Marcin Kalicinski
|
||||
// Version 1.13
|
||||
// Revision $DateTime: 2009/05/13 01:46:17 $
|
||||
//! \file rapidxml_utils.hpp This file contains high-level rapidxml utilities that can be useful
|
||||
//! in certain simple scenarios. They should probably not be used if maximizing performance is the main objective.
|
||||
|
||||
#include "rapidxml.hpp"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace rapidxml
|
||||
{
|
||||
|
||||
//! Represents data loaded from a file
|
||||
template<class Ch = char>
|
||||
class file
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
//! Loads file into the memory. Data will be automatically destroyed by the destructor.
|
||||
//! \param filename Filename to load.
|
||||
file(const char *filename)
|
||||
{
|
||||
using namespace std;
|
||||
|
||||
// Open stream
|
||||
basic_ifstream<Ch> stream(filename, ios::binary);
|
||||
if (!stream)
|
||||
throw runtime_error(string("cannot open file ") + filename);
|
||||
stream.unsetf(ios::skipws);
|
||||
|
||||
// Determine stream size
|
||||
stream.seekg(0, ios::end);
|
||||
size_t size = stream.tellg();
|
||||
stream.seekg(0);
|
||||
|
||||
// Load data and add terminating 0
|
||||
m_data.resize(size + 1);
|
||||
stream.read(&m_data.front(), static_cast<streamsize>(size));
|
||||
m_data[size] = 0;
|
||||
}
|
||||
|
||||
//! Loads file into the memory. Data will be automatically destroyed by the destructor
|
||||
//! \param stream Stream to load from
|
||||
file(std::basic_istream<Ch> &stream)
|
||||
{
|
||||
using namespace std;
|
||||
|
||||
// Load data and add terminating 0
|
||||
stream.unsetf(ios::skipws);
|
||||
m_data.assign(istreambuf_iterator<Ch>(stream), istreambuf_iterator<Ch>());
|
||||
if (stream.fail() || stream.bad())
|
||||
throw runtime_error("error reading stream");
|
||||
m_data.push_back(0);
|
||||
}
|
||||
|
||||
//! Gets file data.
|
||||
//! \return Pointer to data of file.
|
||||
Ch *data()
|
||||
{
|
||||
return &m_data.front();
|
||||
}
|
||||
|
||||
//! Gets file data.
|
||||
//! \return Pointer to data of file.
|
||||
const Ch *data() const
|
||||
{
|
||||
return &m_data.front();
|
||||
}
|
||||
|
||||
//! Gets file data size.
|
||||
//! \return Size of file data, in characters.
|
||||
std::size_t size() const
|
||||
{
|
||||
return m_data.size();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
std::vector<Ch> m_data; // File data
|
||||
|
||||
};
|
||||
|
||||
//! Counts children of node. Time complexity is O(n).
|
||||
//! \return Number of children of node
|
||||
template<class Ch>
|
||||
inline std::size_t count_children(xml_node<Ch> *node)
|
||||
{
|
||||
xml_node<Ch> *child = node->first_node();
|
||||
std::size_t count = 0;
|
||||
while (child)
|
||||
{
|
||||
++count;
|
||||
child = child->next_sibling();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
//! Counts attributes of node. Time complexity is O(n).
|
||||
//! \return Number of attributes of node
|
||||
template<class Ch>
|
||||
inline std::size_t count_attributes(xml_node<Ch> *node)
|
||||
{
|
||||
xml_attribute<Ch> *attr = node->first_attribute();
|
||||
std::size_t count = 0;
|
||||
while (attr)
|
||||
{
|
||||
++count;
|
||||
attr = attr->next_attribute();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
52
ext/tmxlite/CMakeLists.txt
Normal file
52
ext/tmxlite/CMakeLists.txt
Normal file
@@ -0,0 +1,52 @@
|
||||
project(tmxlite VERSION 1.3.1)
|
||||
|
||||
add_library(${PROJECT_NAME} STATIC
|
||||
include/tmxlite/Config.hpp
|
||||
include/tmxlite/FreeFuncs.hpp
|
||||
include/tmxlite/ImageLayer.hpp
|
||||
include/tmxlite/Layer.hpp
|
||||
include/tmxlite/LayerGroup.hpp
|
||||
include/tmxlite/Map.hpp
|
||||
include/tmxlite/Object.hpp
|
||||
include/tmxlite/ObjectGroup.hpp
|
||||
include/tmxlite/ObjectTypes.hpp
|
||||
include/tmxlite/Property.hpp
|
||||
include/tmxlite/TileLayer.hpp
|
||||
include/tmxlite/Tileset.hpp
|
||||
include/tmxlite/Types.hpp
|
||||
include/tmxlite/Types.inl
|
||||
include/tmxlite/detail/Log.hpp
|
||||
|
||||
src/FreeFuncs.cpp
|
||||
src/ImageLayer.cpp
|
||||
src/Map.cpp
|
||||
src/Object.cpp
|
||||
src/ObjectGroup.cpp
|
||||
src/Property.cpp
|
||||
src/TileLayer.cpp
|
||||
src/LayerGroup.cpp
|
||||
src/Tileset.cpp
|
||||
src/ObjectTypes.cpp)
|
||||
|
||||
if (NOT USE_ZLIB)
|
||||
target_sources(${PROJECT_NAME} PRIVATE
|
||||
include/tmxlite/detail/gzip.hpp
|
||||
src/detail/gzip.cpp)
|
||||
endif()
|
||||
|
||||
set_target_properties(${PROJECT_NAME} PROPERTIES
|
||||
CXX_STANDARD 20
|
||||
CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
target_include_directories(${PROJECT_NAME} PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||
|
||||
target_compile_options(${PROJECT_NAME} PRIVATE -Wall)
|
||||
|
||||
target_compile_definitions(${PROJECT_NAME} PRIVATE
|
||||
$<$<BOOL:${MSVC}>:_CRT_SECURE_NO_WARNINGS> # disable msvc warning
|
||||
$<$<TARGET_EXISTS:ZLIB::ZLIB>:USE_ZLIB>)
|
||||
|
||||
target_link_libraries(${PROJECT_NAME} base64::base64 pugixml Zstd::Zstd
|
||||
$<$<TARGET_EXISTS:ZLIB::ZLIB>:ZLIB::ZLIB>
|
||||
$<$<TARGET_EXISTS:miniz::miniz>:miniz::miniz>)
|
||||
13
ext/tmxlite/LICENSE
Normal file
13
ext/tmxlite/LICENSE
Normal file
@@ -0,0 +1,13 @@
|
||||
(c)Matt Marchant & contributors 2016 - 2021 http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty.
|
||||
In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
Permission is granted to anyone to use this software for any purpose, including commercial applications,
|
||||
and to alter it and redistribute it freely, subject to the following restrictions:
|
||||
The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
|
||||
This notice may not be removed or altered from any source distribution.
|
||||
31
ext/tmxlite/include/tmxlite/Config.hpp
Normal file
31
ext/tmxlite/include/tmxlite/Config.hpp
Normal file
@@ -0,0 +1,31 @@
|
||||
/*********************************************************************
|
||||
(c) Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
//static build doesn't need import/export macros
|
||||
#define TMXLITE_EXPORT_API
|
||||
126
ext/tmxlite/include/tmxlite/FreeFuncs.hpp
Normal file
126
ext/tmxlite/include/tmxlite/FreeFuncs.hpp
Normal file
@@ -0,0 +1,126 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <algorithm>
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
//using inline here just to supress unused warnings on gcc
|
||||
bool decompress(const char* source, std::vector<unsigned char>& dest, std::size_t inSize, std::size_t expectedSize);
|
||||
|
||||
static inline Colour colourFromString(std::string str)
|
||||
{
|
||||
//removes preceding #
|
||||
auto result = str.find_last_of('#');
|
||||
if (result != std::string::npos)
|
||||
{
|
||||
str = str.substr(result + 1);
|
||||
}
|
||||
|
||||
if (str.size() == 6 || str.size() == 8)
|
||||
{
|
||||
unsigned int value, r, g, b;
|
||||
unsigned int a = 255;
|
||||
std::stringstream input(str);
|
||||
input >> std::hex >> value;
|
||||
|
||||
r = (value >> 16) & 0xff;
|
||||
g = (value >> 8) & 0xff;
|
||||
b = value & 0xff;
|
||||
|
||||
if (str.size() == 8)
|
||||
{
|
||||
a = (value >> 24) & 0xff;
|
||||
}
|
||||
|
||||
return{ std::uint8_t(r), std::uint8_t(g), std::uint8_t(b), std::uint8_t(a) };
|
||||
}
|
||||
Logger::log(str + ": not a valid colour string", Logger::Type::Error);
|
||||
return{};
|
||||
}
|
||||
|
||||
static inline std::string resolveFilePath(std::string path, const std::string& workingDir)
|
||||
{
|
||||
static const std::string match("../");
|
||||
std::size_t result = path.find(match);
|
||||
std::size_t count = 0;
|
||||
while (result != std::string::npos)
|
||||
{
|
||||
count++;
|
||||
path = path.substr(result + match.size());
|
||||
result = path.find(match);
|
||||
}
|
||||
|
||||
if (workingDir.empty()) return path;
|
||||
|
||||
std::string outPath = workingDir;
|
||||
for (auto i = 0u; i < count; ++i)
|
||||
{
|
||||
result = outPath.find_last_of('/');
|
||||
if (result != std::string::npos)
|
||||
{
|
||||
outPath = outPath.substr(0, result);
|
||||
}
|
||||
}
|
||||
|
||||
return outPath + '/' + path;
|
||||
}
|
||||
|
||||
static inline std::string getFilePath(const std::string& path)
|
||||
{
|
||||
//TODO this doesn't actually check that there is a file at the
|
||||
//end of the path, or that it's even a valid path...
|
||||
|
||||
static auto searchFunc = [](const char separator, const std::string& path)->std::string
|
||||
{
|
||||
std::size_t i = path.rfind(separator, path.length());
|
||||
if (i != std::string::npos)
|
||||
{
|
||||
return(path.substr(0, i + 1));
|
||||
}
|
||||
|
||||
return "";
|
||||
};
|
||||
|
||||
|
||||
#ifdef _WIN32 //try windows formatted paths first
|
||||
std::string retVal = searchFunc('\\', path);
|
||||
if (!retVal.empty()) return retVal;
|
||||
#endif
|
||||
|
||||
return searchFunc('/', path);
|
||||
}
|
||||
} //namespacec tmx
|
||||
107
ext/tmxlite/include/tmxlite/ImageLayer.hpp
Normal file
107
ext/tmxlite/include/tmxlite/ImageLayer.hpp
Normal file
@@ -0,0 +1,107 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2022
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Layer.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Image layers contain a single image which make up that
|
||||
layer. The parser contains the fully resolved path to the image
|
||||
relative to the working directory.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API ImageLayer final : public Layer
|
||||
{
|
||||
public:
|
||||
explicit ImageLayer(const std::string&);
|
||||
|
||||
Type getType() const override { return Layer::Type::Image; }
|
||||
void parse(const pugi::xml_node&, Map*) override;
|
||||
|
||||
/*!
|
||||
\brief Returns the path, relative to the working directory,
|
||||
of the image used by the image layer.
|
||||
*/
|
||||
const std::string& getImagePath() const { return m_filePath; }
|
||||
|
||||
/*!
|
||||
\brief Returns the colour used by the image to represent transparent
|
||||
pixels. By default this is (0, 0, 0, 0)
|
||||
*/
|
||||
const Colour& getTransparencyColour() const { return m_transparencyColour; }
|
||||
|
||||
/*!
|
||||
\brief Returns true if the image used by this layer specifically states a
|
||||
colour to use as transparency
|
||||
*/
|
||||
bool hasTransparency() const { return m_hasTransparency; }
|
||||
|
||||
/*!
|
||||
\brief Returns the size of the image of the image layer in pixels.
|
||||
*/
|
||||
const Vector2u& getImageSize() const { return m_imageSize; }
|
||||
|
||||
/*!
|
||||
\brief Returns true if the image drawn by this layer is repeated along
|
||||
the X axis.
|
||||
*/
|
||||
bool hasRepeatX() const { return m_hasRepeatX; }
|
||||
|
||||
/*!
|
||||
\brief Returns true if the image drawn by this layer is repeated along
|
||||
the Y axis.
|
||||
*/
|
||||
bool hasRepeatY() const { return m_hasRepeatY; }
|
||||
|
||||
private:
|
||||
std::string m_workingDir;
|
||||
std::string m_filePath;
|
||||
Colour m_transparencyColour;
|
||||
bool m_hasTransparency;
|
||||
Vector2u m_imageSize;
|
||||
bool m_hasRepeatX;
|
||||
bool m_hasRepeatY;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline ImageLayer& Layer::getLayerAs<ImageLayer>()
|
||||
{
|
||||
assert(getType() == Type::Image);
|
||||
return *static_cast<ImageLayer*>(this);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline const ImageLayer& Layer::getLayerAs<ImageLayer>() const
|
||||
{
|
||||
assert(getType() == Type::Image);
|
||||
return *static_cast<const ImageLayer*>(this);
|
||||
}
|
||||
}
|
||||
175
ext/tmxlite/include/tmxlite/Layer.hpp
Normal file
175
ext/tmxlite/include/tmxlite/Layer.hpp
Normal file
@@ -0,0 +1,175 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Property.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace pugi
|
||||
{
|
||||
class xml_node;
|
||||
}
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
class Map;
|
||||
class TileLayer;
|
||||
class ObjectGroup;
|
||||
class ImageLayer;
|
||||
class LayerGroup;
|
||||
/*!
|
||||
\brief Represents a layer of a tmx format tile map.
|
||||
This is an abstract base class from which all layer
|
||||
types are derived.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API Layer
|
||||
{
|
||||
public:
|
||||
using Ptr = std::unique_ptr<Layer>;
|
||||
|
||||
Layer() : m_opacity(1.f), m_visible(true) {};
|
||||
virtual ~Layer() = default;
|
||||
|
||||
/*!
|
||||
\brief Layer type as returned by getType()
|
||||
Tile: this layer is a TileLayer type
|
||||
Object: This layer is an ObjectGroup type
|
||||
Image: This layer is an ImageLayer type
|
||||
Group: This layer is a LayerGroup type
|
||||
*/
|
||||
enum class Type
|
||||
{
|
||||
Tile,
|
||||
Object,
|
||||
Image,
|
||||
Group
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Returns a Type value representing the concrete type.
|
||||
Use this when deciding which conrete layer type to use when
|
||||
calling the templated function getLayerAs<T>()
|
||||
*/
|
||||
virtual Type getType() const = 0;
|
||||
|
||||
/*!
|
||||
\brief Returns the class of the Layer, as defined in the editor Tiled 1.9+
|
||||
*/
|
||||
const std::string& getClass() const { return m_class; }
|
||||
|
||||
/*!
|
||||
\brief Use this to get a reference to the concrete layer type
|
||||
which this layer points to.
|
||||
Use getType() to return the type value of this layer and determine
|
||||
if the concrete type is TileLayer, ObjectGroup, ImageLayer, or LayerGroup
|
||||
*/
|
||||
template <typename T>
|
||||
T& getLayerAs();
|
||||
|
||||
|
||||
template <typename T>
|
||||
const T& getLayerAs() const;
|
||||
|
||||
/*!
|
||||
\brief Attempts to parse the specific node layer type
|
||||
*/
|
||||
virtual void parse(const pugi::xml_node&, Map* = nullptr) = 0;
|
||||
|
||||
/*!
|
||||
\brief Returns the name of the layer
|
||||
*/
|
||||
const std::string& getName() const { return m_name; }
|
||||
|
||||
/*!
|
||||
\brief Returns the opacity value for the layer
|
||||
*/
|
||||
float getOpacity() const { return m_opacity; }
|
||||
|
||||
/*!
|
||||
\brief Returns whether this layer is visible or not
|
||||
*/
|
||||
bool getVisible() const { return m_visible; }
|
||||
|
||||
/*!
|
||||
\brief Returns the offset from the top left corner
|
||||
of the layer, in pixels
|
||||
*/
|
||||
const Vector2i& getOffset() const { return m_offset; }
|
||||
|
||||
/*!
|
||||
\brief Returns the parallax factor
|
||||
*/
|
||||
const Vector2f& getParallaxFactor() const { return m_parallaxFactor; }
|
||||
|
||||
/*!
|
||||
\brief Returns the tint colour of the layer.
|
||||
Defaults to 0xFFFFFFFF - pure white
|
||||
*/
|
||||
Colour getTintColour() const { return m_tintColour; }
|
||||
|
||||
/*!
|
||||
\brief Returns the size of the layer, in pixels.
|
||||
This will be the same as the map size for fixed size maps.
|
||||
*/
|
||||
const Vector2u& getSize() const { return m_size; }
|
||||
|
||||
/*!
|
||||
\brief Returns the list of properties of this layer
|
||||
*/
|
||||
const std::vector<Property>& getProperties() const { return m_properties; }
|
||||
|
||||
protected:
|
||||
|
||||
void setName(const std::string& name) { m_name = name; }
|
||||
void setClass(const std::string& cls) { m_class = cls; }
|
||||
void setOpacity(float opacity) { m_opacity = opacity; }
|
||||
void setVisible(bool visible) { m_visible = visible; }
|
||||
void setOffset(std::int32_t x, std::int32_t y) { m_offset = Vector2i(x, y); }
|
||||
void setParallaxFactor(float x, float y) { m_parallaxFactor.x = x; m_parallaxFactor.y = y; }
|
||||
void setTintColour(Colour c) { m_tintColour = c; }
|
||||
void setSize(std::uint32_t width, std::uint32_t height) { m_size = Vector2u(width, height); }
|
||||
void addProperty(const pugi::xml_node& node) { m_properties.emplace_back(); m_properties.back().parse(node); }
|
||||
|
||||
private:
|
||||
std::string m_name;
|
||||
std::string m_class;
|
||||
float m_opacity;
|
||||
bool m_visible;
|
||||
Vector2i m_offset;
|
||||
Vector2f m_parallaxFactor;
|
||||
Colour m_tintColour = { 255,255,255,255 };
|
||||
Vector2u m_size;
|
||||
|
||||
std::vector<Property> m_properties;
|
||||
};
|
||||
}
|
||||
86
ext/tmxlite/include/tmxlite/LayerGroup.hpp
Normal file
86
ext/tmxlite/include/tmxlite/LayerGroup.hpp
Normal file
@@ -0,0 +1,86 @@
|
||||
/*********************************************************************
|
||||
Grant Gangi 2019 - 2022
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Layer.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Layer groups are used to organize the layers of
|
||||
the map in a hierarchy. They can contain all other layer
|
||||
types including more layer groups to further nest layers.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API LayerGroup final : public Layer
|
||||
{
|
||||
public:
|
||||
|
||||
LayerGroup(const std::string& workDir, const Vector2u& tileCount);
|
||||
~LayerGroup() = default;
|
||||
LayerGroup(const LayerGroup&) = delete;
|
||||
const LayerGroup& operator = (const LayerGroup&) = delete;
|
||||
LayerGroup(LayerGroup&&) = default;
|
||||
LayerGroup& operator = (LayerGroup&&) = default;
|
||||
|
||||
|
||||
Type getType() const override { return Layer::Type::Group; }
|
||||
void parse(const pugi::xml_node&, Map*) override;
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector containing the layer data.
|
||||
Layers are pointer-to-baseclass, the concrete type of which can be
|
||||
found via Layer::getType()
|
||||
\see Layer
|
||||
*/
|
||||
const std::vector<Layer::Ptr>& getLayers() const { return m_layers; }
|
||||
|
||||
private:
|
||||
|
||||
std::vector<Layer::Ptr> m_layers;
|
||||
|
||||
std::string m_workingDir;
|
||||
Vector2u m_tileCount;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline LayerGroup& Layer::getLayerAs<LayerGroup>()
|
||||
{
|
||||
assert(getType() == Type::Group);
|
||||
return *static_cast<LayerGroup*>(this);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline const LayerGroup& Layer::getLayerAs<LayerGroup>() const
|
||||
{
|
||||
assert(getType() == Type::Group);
|
||||
return *static_cast<const LayerGroup*>(this);
|
||||
}
|
||||
}
|
||||
282
ext/tmxlite/include/tmxlite/Map.hpp
Normal file
282
ext/tmxlite/include/tmxlite/Map.hpp
Normal file
@@ -0,0 +1,282 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 -2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Tileset.hpp"
|
||||
#include "tmxlite/Layer.hpp"
|
||||
#include "tmxlite/Property.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
#include "tmxlite/Object.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Holds the xml version of the loaded map
|
||||
*/
|
||||
struct TMXLITE_EXPORT_API Version
|
||||
{
|
||||
//major/minor are apparently reserved by gcc
|
||||
std::uint16_t upper;
|
||||
std::uint16_t lower;
|
||||
Version(std::uint16_t maj = 0, std::uint16_t min = 0)
|
||||
: upper(maj), lower(min) {}
|
||||
};
|
||||
|
||||
enum class Orientation
|
||||
{
|
||||
Orthogonal,
|
||||
Isometric,
|
||||
Staggered,
|
||||
Hexagonal,
|
||||
None
|
||||
};
|
||||
|
||||
enum class RenderOrder
|
||||
{
|
||||
RightDown,
|
||||
RightUp,
|
||||
LeftDown,
|
||||
LeftUp,
|
||||
None
|
||||
};
|
||||
|
||||
enum class StaggerAxis
|
||||
{
|
||||
X, Y, None
|
||||
};
|
||||
|
||||
enum class StaggerIndex
|
||||
{
|
||||
Even, Odd, None
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Parser for TMX format tile maps.
|
||||
This class can be used to parse the XML format tile maps created
|
||||
with the Tiled map editor, providing an interface to create drawable and
|
||||
physics objects. Typical usage would be to create an instance of this
|
||||
class before calling load() providing a path to the *.tmx file to be
|
||||
loaded. Then layers or objects can be requested from the Map class
|
||||
to be interpreted as needed.
|
||||
\see https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#map
|
||||
*/
|
||||
class TMXLITE_EXPORT_API Map final
|
||||
{
|
||||
public:
|
||||
|
||||
Map();
|
||||
~Map() = default;
|
||||
Map(const Map&) = delete;
|
||||
Map& operator = (const Map&) = delete;
|
||||
Map(Map&&) = default;
|
||||
Map& operator = (Map&&) = default;
|
||||
|
||||
/*!
|
||||
\brief Attempts to parse the tilemap at the given location.
|
||||
\param std::string Path to map file to try to parse
|
||||
\returns true if map was parsed successfully else returns false.
|
||||
In debug mode this will attempt to log any errors to the console.
|
||||
*/
|
||||
bool load(const std::string&);
|
||||
|
||||
/*!
|
||||
\brief Loads a map from a document stored in a string
|
||||
\param data A std::string containing the map data to load
|
||||
\param workingDir A std::string containing the working directory
|
||||
in which to find assets such as tile sets or images
|
||||
\returns true if successful, else false
|
||||
*/
|
||||
bool loadFromString(const std::string& data, const std::string& workingDir);
|
||||
|
||||
/*!
|
||||
\brief Returns the version of the tile map last parsed.
|
||||
If no tile map has yet been parsed the version will read 0, 0
|
||||
*/
|
||||
const Version& getVersion() const { return m_version; }
|
||||
|
||||
/*!
|
||||
\brief Returns the orientation of the map if one is loaded,
|
||||
else returns None
|
||||
*/
|
||||
Orientation getOrientation() const { return m_orientation; }
|
||||
|
||||
/*!
|
||||
\brief Returns the RenderOrder of the map if one is loaded,
|
||||
else returns None
|
||||
*/
|
||||
RenderOrder getRenderOrder() const { return m_renderOrder; }
|
||||
|
||||
/*!
|
||||
\brief Returns the tile count of the map in the X and Y directions
|
||||
*/
|
||||
const Vector2u& getTileCount() const { return m_tileCount; }
|
||||
|
||||
/*!
|
||||
\brief Returns the size of the tile grid in this map.
|
||||
Actual tile sizes may vary and will be extended / shrunk about
|
||||
the bottom left corner of the tile.
|
||||
*/
|
||||
const Vector2u& getTileSize() const { return m_tileSize; }
|
||||
|
||||
/*!
|
||||
\brief Returns the bounds of the map
|
||||
*/
|
||||
FloatRect getBounds() const { return FloatRect(0.f, 0.f, static_cast<float>(m_tileCount.x * m_tileSize.x), static_cast<float>(m_tileCount.y * m_tileSize.y)); }
|
||||
|
||||
/*!
|
||||
\brief Returns the length of an edge of a tile if a Hexagonal
|
||||
map is loaded.
|
||||
The length returned is in pixels of the straight edge running
|
||||
along the axis returned by getStaggerAxis(). If no map is loaded
|
||||
or the loaded map is not of Hexagonal orientation this function
|
||||
returns 0.f
|
||||
*/
|
||||
float getHexSideLength() const { return m_hexSideLength; }
|
||||
|
||||
/*!
|
||||
\brief Stagger axis of the map.
|
||||
If either a Staggered or Hexagonal tile map is loaded this returns
|
||||
which axis the map is staggered along, else returns None.
|
||||
*/
|
||||
StaggerAxis getStaggerAxis() const { return m_staggerAxis; }
|
||||
|
||||
/*!
|
||||
\brief Stagger Index of the loaded map.
|
||||
If a Staggered or Hexagonal map is loaded this returns whether
|
||||
the even or odd rows of tiles are staggered, otherwise it returns None.
|
||||
*/
|
||||
StaggerIndex getStaggerIndex() const { return m_staggerIndex; }
|
||||
|
||||
/*!
|
||||
\brief Returns the background colour of the map.
|
||||
*/
|
||||
const Colour& getBackgroundColour() const { return m_backgroundColour; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector of tile sets used by the map
|
||||
*/
|
||||
const std::vector<Tileset>& getTilesets() const { return m_tilesets; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector containing the layer data.
|
||||
Layers are pointer-to-baseclass, the concrete type of which can be
|
||||
found via Layer::getType()
|
||||
\see Layer
|
||||
*/
|
||||
const std::vector<Layer::Ptr>& getLayers() const { return m_layers; }
|
||||
|
||||
/*!
|
||||
\brief Returns the class of the Map, as defined in the editor Tiled 1.9+
|
||||
*/
|
||||
const std::string& getClass() const { return m_class; }
|
||||
|
||||
/*!
|
||||
\brief Returns a vector of Property objects loaded by the map
|
||||
*/
|
||||
const std::vector<Property>& getProperties() const { return m_properties; }
|
||||
|
||||
/*!
|
||||
\brief Returns a Hashmap of all animated tiles accessible by TileID
|
||||
*/
|
||||
const std::map<std::uint32_t, Tileset::Tile>& getAnimatedTiles() const { return m_animTiles; }
|
||||
|
||||
/*!
|
||||
\brief Returns the current working directory of the map. Images and
|
||||
other resources are loaded relative to this.
|
||||
*/
|
||||
const std::string& getWorkingDirectory() const { return m_workingDirectory; }
|
||||
|
||||
/*!
|
||||
\brief Returns an unordered_map of template objects indexed by file name
|
||||
*/
|
||||
std::unordered_map<std::string, Object>& getTemplateObjects() { return m_templateObjects; }
|
||||
const std::unordered_map<std::string, Object>& getTemplateObjects() const { return m_templateObjects; }
|
||||
|
||||
/*!
|
||||
\brief Returns an unordered_map of tilesets used by templated objects.
|
||||
If Object::getTilesetName() is not empty it can be used to retreive a tileset
|
||||
from this map. Otherwise the object's tileset can be found from in the map's
|
||||
global tilesets returned by getTilesets().
|
||||
*/
|
||||
std::unordered_map<std::string, Tileset>& getTemplateTilesets() { return m_templateTilesets; }
|
||||
const std::unordered_map<std::string, Tileset>& getTemplateTilesets() const { return m_templateTilesets; }
|
||||
|
||||
/*!
|
||||
\brief Returns true if this is in infinite tile map.
|
||||
Infinite maps store their tile data in for tile layers in chunks. If
|
||||
this is an infinite map use TileLayer::getChunks() to get tile IDs
|
||||
rather than TileLayer::getTiles().
|
||||
\see TileLayer
|
||||
*/
|
||||
bool isInfinite() const { return m_infinite; }
|
||||
|
||||
/*
|
||||
\brief Returns the origin of each layer's parallax offset value
|
||||
*/
|
||||
Vector2f getParallaxOrigin() const { return m_parallaxOrigin; }
|
||||
|
||||
private:
|
||||
Version m_version;
|
||||
std::string m_class;
|
||||
Orientation m_orientation;
|
||||
RenderOrder m_renderOrder;
|
||||
bool m_infinite;
|
||||
|
||||
Vector2u m_tileCount;
|
||||
Vector2u m_tileSize;
|
||||
|
||||
float m_hexSideLength;
|
||||
StaggerAxis m_staggerAxis;
|
||||
StaggerIndex m_staggerIndex;
|
||||
|
||||
Vector2f m_parallaxOrigin;
|
||||
|
||||
Colour m_backgroundColour;
|
||||
|
||||
std::string m_workingDirectory;
|
||||
|
||||
std::vector<Tileset> m_tilesets;
|
||||
std::vector<Layer::Ptr> m_layers;
|
||||
std::vector<Property> m_properties;
|
||||
std::map<std::uint32_t, Tileset::Tile> m_animTiles;
|
||||
|
||||
std::unordered_map<std::string, Object> m_templateObjects;
|
||||
std::unordered_map<std::string, Tileset> m_templateTilesets;
|
||||
|
||||
bool parseMapNode(const pugi::xml_node&);
|
||||
|
||||
//always returns false so we can return this
|
||||
//on load failure
|
||||
bool reset();
|
||||
};
|
||||
}
|
||||
221
ext/tmxlite/include/tmxlite/Object.hpp
Normal file
221
ext/tmxlite/include/tmxlite/Object.hpp
Normal file
@@ -0,0 +1,221 @@
|
||||
/*********************************************************************
|
||||
(c) Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Property.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace pugi
|
||||
{
|
||||
class xml_node;
|
||||
}
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
class Map;
|
||||
|
||||
/*!
|
||||
\brief Contains the text information stored in a Text object.
|
||||
*/
|
||||
struct TMXLITE_EXPORT_API Text final
|
||||
{
|
||||
std::string fontFamily;
|
||||
std::uint32_t pixelSize = 16; //!< pixels, not points
|
||||
bool wrap = false;
|
||||
Colour colour;
|
||||
bool bold = false;
|
||||
bool italic = false;
|
||||
bool underline = false;
|
||||
bool strikethough = false;
|
||||
bool kerning = true;
|
||||
|
||||
enum class HAlign
|
||||
{
|
||||
Left, Centre, Right
|
||||
}hAlign = HAlign::Left;
|
||||
|
||||
enum class VAlign
|
||||
{
|
||||
Top, Centre, Bottom
|
||||
}vAlign = VAlign::Top;
|
||||
|
||||
std::string content; //!< actual string content
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Objects are stored in ObjectGroup layers.
|
||||
Objects may be rectangular, elliptical, polygonal or
|
||||
a polyline. Rectangular and elliptical Objects have their
|
||||
size determined via the AABB, whereas polygon and polyline
|
||||
shapes are defined by a list of points. Objects are
|
||||
rectangular by default. Since version 1.0 Objects also
|
||||
support Text nodes.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API Object final
|
||||
{
|
||||
public:
|
||||
enum class Shape
|
||||
{
|
||||
Rectangle,
|
||||
Ellipse,
|
||||
Point,
|
||||
Polygon,
|
||||
Polyline,
|
||||
Text
|
||||
};
|
||||
|
||||
Object();
|
||||
|
||||
/*!
|
||||
\brief Attempts to parse the given xml node and
|
||||
read the Object properties if it is valid.
|
||||
*/
|
||||
void parse(const pugi::xml_node&, Map*);
|
||||
|
||||
/*!
|
||||
\brief Returns the unique ID of the Object
|
||||
*/
|
||||
std::uint32_t getUID() const { return m_UID; }
|
||||
|
||||
/*!
|
||||
\brief Returns the name of the Object
|
||||
*/
|
||||
const std::string& getName() const { return m_name; }
|
||||
|
||||
/*!
|
||||
\brief Returns the type (equal to class) of the Object, as defined in the editor Tiled < 1.9
|
||||
*/
|
||||
const std::string& getType() const { return m_class; }
|
||||
|
||||
/*!
|
||||
\brief Returns the class (equal to type) of the Object, as defined in the editor Tiled 1.9+
|
||||
*/
|
||||
const std::string& getClass() const { return m_class; }
|
||||
|
||||
/*!
|
||||
\brief Returns the position of the Object in pixels
|
||||
*/
|
||||
const Vector2f& getPosition() const { return m_position; }
|
||||
|
||||
/*!
|
||||
\brief Returns the global Axis Aligned Bounding Box.
|
||||
The AABB is positioned via the left and top properties, and
|
||||
define the Object's width and height. This can be used to derive
|
||||
the shape of the Object if it is rectangular or elliptical.
|
||||
*/
|
||||
const FloatRect& getAABB() const { return m_AABB; }
|
||||
|
||||
/*!
|
||||
\brief Returns the rotation of the Object in degrees clockwise
|
||||
*/
|
||||
float getRotation() const { return m_rotation; }
|
||||
|
||||
/*!
|
||||
\brief Returns the global tile ID associated with the Object
|
||||
if there is one. This is used to draw the Object (and therefore
|
||||
the Object must be rectangular)
|
||||
*/
|
||||
std::uint32_t getTileID() const { return m_tileID; }
|
||||
|
||||
/*!
|
||||
\brief Returns the flip flags if the objects uses a TileID to
|
||||
draw it.
|
||||
Returns 0 otherwise.
|
||||
*/
|
||||
std::uint8_t getFlipFlags() const { return m_flipFlags; }
|
||||
|
||||
/*!
|
||||
\brief Returns whether or not the Object is visible
|
||||
*/
|
||||
bool visible() const { return m_visible; }
|
||||
|
||||
/*!
|
||||
\brief Returns the Shape type of the Object
|
||||
*/
|
||||
Shape getShape() const { return m_shape; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector of points which
|
||||
make up the Object. If the Object is rectangular or elliptical
|
||||
then the vector will be empty. Point coordinates are in pixels,
|
||||
relative to the object position.
|
||||
*/
|
||||
const std::vector<Vector2f>& getPoints() const { return m_points; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector of properties belonging to
|
||||
the Object.
|
||||
*/
|
||||
const std::vector<Property>& getProperties() const { return m_properties; }
|
||||
|
||||
/*!
|
||||
\brief Returns a Text struct containing information about any text
|
||||
this object may have, such as font data and formatting.
|
||||
If an object does not contain any text information this struct will
|
||||
be populated with default values. Use getShape() to determine
|
||||
if this object is in fact a text object.
|
||||
*/
|
||||
const Text& getText() const { return m_textData; }
|
||||
Text& getText() { return m_textData; }
|
||||
|
||||
/*!
|
||||
\brief Returns the tileset name used by this object if it is derived
|
||||
from a template, else returns an empty string.
|
||||
If the string is not empty use it to index the unordered_map returned
|
||||
by Map::getTemplateTilesets()
|
||||
*/
|
||||
const std::string& getTilesetName() const { return m_tilesetName; }
|
||||
|
||||
private:
|
||||
std::uint32_t m_UID;
|
||||
std::string m_name;
|
||||
std::string m_class;
|
||||
Vector2f m_position;
|
||||
FloatRect m_AABB;
|
||||
float m_rotation;
|
||||
std::uint32_t m_tileID;
|
||||
std::uint8_t m_flipFlags;
|
||||
bool m_visible;
|
||||
|
||||
Shape m_shape;
|
||||
std::vector<Vector2f> m_points;
|
||||
std::vector<Property> m_properties;
|
||||
|
||||
Text m_textData;
|
||||
|
||||
std::string m_tilesetName;
|
||||
|
||||
void parsePoints(const pugi::xml_node&);
|
||||
void parseText(const pugi::xml_node&);
|
||||
void parseTemplate(const std::string&, Map*);
|
||||
};
|
||||
}
|
||||
99
ext/tmxlite/include/tmxlite/ObjectGroup.hpp
Normal file
99
ext/tmxlite/include/tmxlite/ObjectGroup.hpp
Normal file
@@ -0,0 +1,99 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2022
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Layer.hpp"
|
||||
#include "tmxlite/Object.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief ObjectGroup layers contain a series of Objects
|
||||
which may be made up of shapes or images.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API ObjectGroup final : public Layer
|
||||
{
|
||||
public:
|
||||
enum class DrawOrder
|
||||
{
|
||||
Index, //< objects should be drawn in the order in which they appear
|
||||
TopDown //< objects should be drawn sorted by their Y position
|
||||
};
|
||||
|
||||
ObjectGroup();
|
||||
|
||||
Type getType() const override { return Layer::Type::Object; }
|
||||
void parse(const pugi::xml_node&, Map*) override;
|
||||
|
||||
/*!
|
||||
\brief Returns the colour associated with this layer
|
||||
*/
|
||||
const Colour& getColour() const { return m_colour; }
|
||||
|
||||
/*!
|
||||
\brief Returns the DrawOrder for the objects in this group.
|
||||
Defaults to TopDown, where Objects are drawn sorted by Y position
|
||||
*/
|
||||
DrawOrder getDrawOrder() const { return m_drawOrder; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector of properties for
|
||||
the ObjectGroup
|
||||
*/
|
||||
const std::vector<Property>& getProperties() const { return m_properties; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector of Objects which belong to the group
|
||||
*/
|
||||
const std::vector<Object>& getObjects() const { return m_objects; }
|
||||
|
||||
private:
|
||||
Colour m_colour;
|
||||
DrawOrder m_drawOrder;
|
||||
|
||||
std::vector<Property> m_properties;
|
||||
std::vector<Object> m_objects;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline ObjectGroup& Layer::getLayerAs<ObjectGroup>()
|
||||
{
|
||||
assert(getType() == Type::Object);
|
||||
return *static_cast<ObjectGroup*>(this);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline const ObjectGroup& Layer::getLayerAs<ObjectGroup>() const
|
||||
{
|
||||
assert(getType() == Type::Object);
|
||||
return *static_cast<const ObjectGroup*>(this);
|
||||
}
|
||||
}
|
||||
86
ext/tmxlite/include/tmxlite/ObjectTypes.hpp
Normal file
86
ext/tmxlite/include/tmxlite/ObjectTypes.hpp
Normal file
@@ -0,0 +1,86 @@
|
||||
/*********************************************************************
|
||||
Raphaël Frantz 2021
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Property.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Parser for Tiled object types export format.
|
||||
Link to the specification: https://doc.mapeditor.org/fr/latest/manual/custom-properties/#predefining-properties.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API ObjectTypes final
|
||||
{
|
||||
public:
|
||||
/*!
|
||||
\brief Types that stores all predefined properties for all objects of this type.
|
||||
To take less spaces, they are not exported by default into maps.
|
||||
*/
|
||||
struct Type
|
||||
{
|
||||
std::string name;
|
||||
Colour colour;
|
||||
std::vector<Property> properties;
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Attempts to parse the object types at the given location.
|
||||
\param std::string Path to object types file to try to parse
|
||||
\returns true if object types was parsed successfully else returns false.
|
||||
In debug mode this will attempt to log any errors to the console.
|
||||
*/
|
||||
bool load(const std::string&);
|
||||
|
||||
/*!
|
||||
\brief Loads an object types from a document stored in a string
|
||||
\param data A std::string containing the object types to load
|
||||
\param workingDir A std::string containing the working directory
|
||||
in which to find files.
|
||||
\returns true if successful, else false
|
||||
*/
|
||||
bool loadFromString(const std::string& data, const std::string& workingDir);
|
||||
|
||||
/*!
|
||||
\brief Returns all predefined types and their default values.
|
||||
*/
|
||||
const std::vector<Type>& getTypes() const { return m_types; }
|
||||
|
||||
private:
|
||||
std::string m_workingDirectory;
|
||||
std::vector<Type> m_types;
|
||||
|
||||
bool parseObjectTypesNode(const pugi::xml_node&);
|
||||
|
||||
//always returns false so we can return this
|
||||
//on load failure
|
||||
bool reset();
|
||||
};
|
||||
}
|
||||
144
ext/tmxlite/include/tmxlite/Property.hpp
Normal file
144
ext/tmxlite/include/tmxlite/Property.hpp
Normal file
@@ -0,0 +1,144 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <cassert>
|
||||
|
||||
namespace pugi
|
||||
{
|
||||
class xml_node;
|
||||
}
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Represents a custom property.
|
||||
Tiles, objects and layers of a tmx map may have custom
|
||||
properties assigned to them. This class represents a
|
||||
single property and provides access to its value, the
|
||||
type of which can be determined with getType()
|
||||
*/
|
||||
class TMXLITE_EXPORT_API Property final
|
||||
{
|
||||
public:
|
||||
|
||||
enum class Type
|
||||
{
|
||||
Boolean,
|
||||
Float,
|
||||
Int,
|
||||
String,
|
||||
Colour,
|
||||
File,
|
||||
Object,
|
||||
Undef
|
||||
};
|
||||
|
||||
Property();
|
||||
|
||||
static Property fromBoolean(bool value);
|
||||
static Property fromFloat(float value);
|
||||
static Property fromInt(int value);
|
||||
static Property fromString(const std::string& value);
|
||||
static Property fromColour(const Colour& value);
|
||||
static Property fromFile(const std::string& value);
|
||||
static Property fromObject(int value);
|
||||
|
||||
/*!
|
||||
\brief Attempts to parse the given node as a property
|
||||
\param isObjectTypes Set to true if the parsing is done from an object types files.
|
||||
*/
|
||||
void parse(const pugi::xml_node&, bool isObjectTypes = false);
|
||||
|
||||
/*!
|
||||
\brief Returns the type of data stored in the property.
|
||||
This should generally be called first before trying to
|
||||
read the proprty value, as reading the incorrect type
|
||||
will lead to undefined behaviour.
|
||||
*/
|
||||
Type getType() const { return m_type; }
|
||||
|
||||
/*!
|
||||
\brief Returns the name of this property
|
||||
*/
|
||||
const std::string& getName() const { return m_name; }
|
||||
|
||||
/*!
|
||||
\brief Returns the property's value as a boolean
|
||||
*/
|
||||
bool getBoolValue() const { assert(m_type == Type::Boolean); return m_boolValue; }
|
||||
|
||||
/*!
|
||||
\brief Returns the property's value as a float
|
||||
*/
|
||||
float getFloatValue() const { assert(m_type == Type::Float); return m_floatValue; }
|
||||
|
||||
/*!
|
||||
\brief Returns the property's value as an integer
|
||||
*/
|
||||
int getIntValue() const { assert(m_type == Type::Int || m_type == Type::Object); return m_intValue; }
|
||||
|
||||
/*!
|
||||
\brief Returns the property's value as a string
|
||||
*/
|
||||
const std::string& getStringValue() const { assert(m_type == Type::String); return m_stringValue; }
|
||||
|
||||
/*!
|
||||
\brief Returns the property's value as a Colour struct
|
||||
*/
|
||||
const Colour& getColourValue() const { assert(m_type == Type::Colour); return m_colourValue; }
|
||||
|
||||
/*!
|
||||
\brief Returns the file path property as a string, relative to the map file
|
||||
*/
|
||||
const std::string& getFileValue() const { assert(m_type == Type::File); return m_stringValue; }
|
||||
|
||||
/*!
|
||||
\brief Returns the property's value as an integer object handle
|
||||
*/
|
||||
int getObjectValue() const { assert(m_type == Type::Object); return m_intValue; }
|
||||
|
||||
|
||||
private:
|
||||
union
|
||||
{
|
||||
bool m_boolValue;
|
||||
float m_floatValue;
|
||||
int m_intValue;
|
||||
};
|
||||
std::string m_stringValue;
|
||||
std::string m_name;
|
||||
Colour m_colourValue;
|
||||
|
||||
Type m_type;
|
||||
};
|
||||
}
|
||||
116
ext/tmxlite/include/tmxlite/TileLayer.hpp
Normal file
116
ext/tmxlite/include/tmxlite/TileLayer.hpp
Normal file
@@ -0,0 +1,116 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2022
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Layer.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief A layer made up from a series of tile sets
|
||||
*/
|
||||
class TMXLITE_EXPORT_API TileLayer final : public Layer
|
||||
{
|
||||
public:
|
||||
/*!
|
||||
\brief Tile information for a layer
|
||||
*/
|
||||
struct Tile final
|
||||
{
|
||||
std::uint32_t ID = 0; //!< Global ID of the tile
|
||||
std::uint8_t flipFlags = 0; //!< Flags marking if the tile should be flipped when drawn
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Represents a chunk of tile data, if this is an infinite map
|
||||
*/
|
||||
struct Chunk final
|
||||
{
|
||||
Vector2i position; //<! coordinate in tiles, not pixels
|
||||
Vector2i size; //!< size in tiles, not pixels
|
||||
std::vector<Tile> tiles;
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Flags used to tell if a tile is flipped when drawn
|
||||
*/
|
||||
enum FlipFlag
|
||||
{
|
||||
Horizontal = 0x8,
|
||||
Vertical = 0x4,
|
||||
Diagonal = 0x2
|
||||
};
|
||||
|
||||
explicit TileLayer(std::size_t);
|
||||
|
||||
Type getType() const override { return Layer::Type::Tile; }
|
||||
void parse(const pugi::xml_node&, Map*) override;
|
||||
|
||||
/*!
|
||||
\brief Returns the list of tiles used to make up the layer
|
||||
If this is empty then the map is most likely infinite, in
|
||||
which case the tile data is stored in chunks.
|
||||
\see getChunks()
|
||||
*/
|
||||
const std::vector<Tile>& getTiles() const { return m_tiles; }
|
||||
|
||||
/*!
|
||||
\brief Returns a vector of chunks which make up this layer
|
||||
if the map is set to infinite. This will be empty if the map
|
||||
is not infinite.
|
||||
\see getTiles()
|
||||
*/
|
||||
const std::vector<Chunk>& getChunks() const { return m_chunks; }
|
||||
|
||||
private:
|
||||
std::vector<Tile> m_tiles;
|
||||
std::vector<Chunk> m_chunks;
|
||||
std::size_t m_tileCount;
|
||||
|
||||
void parseBase64(const pugi::xml_node&);
|
||||
void parseCSV(const pugi::xml_node&);
|
||||
void parseUnencoded(const pugi::xml_node&);
|
||||
|
||||
void createTiles(const std::vector<std::uint32_t>&, std::vector<Tile>& destination);
|
||||
};
|
||||
|
||||
template <>
|
||||
inline TileLayer& Layer::getLayerAs<TileLayer>()
|
||||
{
|
||||
assert(getType() == Type::Tile);
|
||||
return *static_cast<TileLayer*>(this);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline const TileLayer& Layer::getLayerAs<TileLayer>() const
|
||||
{
|
||||
assert(getType() == Type::Tile);
|
||||
return *static_cast<const TileLayer*>(this);
|
||||
}
|
||||
}
|
||||
296
ext/tmxlite/include/tmxlite/Tileset.hpp
Normal file
296
ext/tmxlite/include/tmxlite/Tileset.hpp
Normal file
@@ -0,0 +1,296 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
#include "tmxlite/Property.hpp"
|
||||
#include "tmxlite/ObjectGroup.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
|
||||
namespace pugi
|
||||
{
|
||||
class xml_node;
|
||||
}
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
class Map;
|
||||
|
||||
/*!
|
||||
\brief Represents a Tileset node as loaded
|
||||
from a *.tmx format tile map via the tmx::Map
|
||||
class.
|
||||
*/
|
||||
class TMXLITE_EXPORT_API Tileset final
|
||||
{
|
||||
public:
|
||||
explicit Tileset(const std::string& workingDir);
|
||||
|
||||
/*!
|
||||
\brief Any tiles within a tile set which have special
|
||||
data associated with them such as animation or terrain
|
||||
information will have one of these stored in the tile set.
|
||||
*/
|
||||
struct Tile final
|
||||
{
|
||||
std::uint32_t ID = 0;
|
||||
std::array<std::int32_t, 4u> terrainIndices{};
|
||||
std::uint32_t probability = 100;
|
||||
|
||||
/*!
|
||||
\brief a group of frames which make up an animation
|
||||
*/
|
||||
struct Animation final
|
||||
{
|
||||
/*!
|
||||
\brief A frame within an animation
|
||||
*/
|
||||
struct Frame final
|
||||
{
|
||||
std::uint32_t tileID = 0;
|
||||
std::uint32_t duration = 0;
|
||||
|
||||
bool operator == (const Frame& other) const
|
||||
{
|
||||
return (this == &other) ||
|
||||
(tileID == other.tileID && duration == other.duration);
|
||||
}
|
||||
|
||||
bool operator != (const Frame& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
std::vector<Frame> frames;
|
||||
}animation;
|
||||
std::vector<Property> properties;
|
||||
ObjectGroup objectGroup;
|
||||
std::string imagePath;
|
||||
Vector2u imageSize;
|
||||
/*!
|
||||
\brief The position of the tile within the image.
|
||||
*/
|
||||
Vector2u imagePosition;
|
||||
std::string className;
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Terrain information with which one
|
||||
or more tiles may be associated.
|
||||
*/
|
||||
struct Terrain final
|
||||
{
|
||||
std::string name;
|
||||
std::uint32_t tileID = -1;
|
||||
std::vector<Property> properties;
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Declares the alignment of tile Objects
|
||||
*/
|
||||
enum class ObjectAlignment
|
||||
{
|
||||
Unspecified,
|
||||
TopLeft,
|
||||
Top,
|
||||
TopRight,
|
||||
Left,
|
||||
Center,
|
||||
Right,
|
||||
BottomLeft,
|
||||
Bottom,
|
||||
BottomRight
|
||||
};
|
||||
|
||||
/*!
|
||||
\brief Attempts to parse the given xml node.
|
||||
If node parsing fails an error is printed in the console
|
||||
and the Tileset remains in an uninitialised state.
|
||||
*/
|
||||
void parse(pugi::xml_node, Map*);
|
||||
|
||||
/*!
|
||||
\brief Returns the first GID of this tile set.
|
||||
This the ID of the first tile in the tile set, so that
|
||||
each tile set guarantees a unique set of IDs
|
||||
*/
|
||||
std::uint32_t getFirstGID() const { return m_firstGID; }
|
||||
|
||||
/*!
|
||||
\brief Returns the last GID of this tile set.
|
||||
This is the ID of the last tile in the tile set.
|
||||
*/
|
||||
std::uint32_t getLastGID() const;
|
||||
|
||||
/*!
|
||||
\brief Returns the name of this tile set.
|
||||
*/
|
||||
const std::string& getName() const { return m_name; }
|
||||
|
||||
/*!
|
||||
\brief Returns the class of the Tileset, as defined in the editor Tiled 1.9+
|
||||
*/
|
||||
const std::string& getClass() const { return m_class; }
|
||||
|
||||
/*!
|
||||
\brief Returns the width and height of a tile in the
|
||||
tile set, in pixels.
|
||||
*/
|
||||
const Vector2u& getTileSize() const { return m_tileSize; }
|
||||
|
||||
/*!
|
||||
\brief Returns the spacing, in pixels, between each tile in the set
|
||||
*/
|
||||
std::uint32_t getSpacing() const { return m_spacing; }
|
||||
|
||||
/*!
|
||||
\brief Returns the margin, in pixels, around each tile in the set
|
||||
*/
|
||||
std::uint32_t getMargin() const { return m_margin; }
|
||||
|
||||
/*!
|
||||
\brief Returns the number of tiles in the tile set
|
||||
*/
|
||||
std::uint32_t getTileCount() const { return m_tileCount; }
|
||||
|
||||
/*!
|
||||
\brief Returns the number of columns which make up the tile set.
|
||||
This is used when rendering collection of images sets
|
||||
*/
|
||||
std::uint32_t getColumnCount() const { return m_columnCount; }
|
||||
|
||||
/*!
|
||||
\brief Returns the alignment of tile objects.
|
||||
The default value is ObjectAlignment::Unspecified for compatibility.
|
||||
When the alignment is Unspecified tile objects use BottomLeft in
|
||||
orthogonal mode and Bottom in isometric mode.
|
||||
\see ObjectAlignment
|
||||
*/
|
||||
ObjectAlignment getObjectAlignment() const { return m_objectAlignment; }
|
||||
|
||||
/*!
|
||||
\brief Returns the tile offset in pixels.
|
||||
Tile will draw tiles offset from the top left using this value.
|
||||
*/
|
||||
const Vector2u& getTileOffset() const { return m_tileOffset; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the list of Property objects for this
|
||||
tile set
|
||||
*/
|
||||
const std::vector<Property>& getProperties() const { return m_properties; }
|
||||
|
||||
/*!
|
||||
\brief Returns the file path to the tile set image, relative to the
|
||||
working directory. Use this to load the texture required by whichever
|
||||
method you choose to render the map.
|
||||
*/
|
||||
const std::string& getImagePath() const { return m_imagePath; }
|
||||
|
||||
/*!
|
||||
\brief Returns the size of the tile set image in pixels.
|
||||
*/
|
||||
const Vector2u& getImageSize() const { return m_imageSize; }
|
||||
|
||||
/*!
|
||||
\brief Returns the colour used by the tile map image to represent transparency.
|
||||
By default this is a transparent colour (0, 0, 0, 0)
|
||||
*/
|
||||
const Colour& getTransparencyColour() const { return m_transparencyColour; }
|
||||
|
||||
/*!
|
||||
\brief Returns true if the image used by this tileset specifically requests
|
||||
a colour to use as transparency.
|
||||
*/
|
||||
bool hasTransparency() const { return m_hasTransparency; }
|
||||
|
||||
/*!
|
||||
\brief Returns a vector of Terrain types associated with one
|
||||
or more tiles within this tile set
|
||||
*/
|
||||
const std::vector<Terrain>& getTerrainTypes() const { return m_terrainTypes; }
|
||||
|
||||
/*!
|
||||
\brief Returns a reference to the vector of tile data used by
|
||||
tiles which make up this tile set.
|
||||
*/
|
||||
const std::vector<Tile>& getTiles() const { return m_tiles; }
|
||||
|
||||
/*!
|
||||
\brief Checks if a tiled ID is in the range of the first ID and the last ID
|
||||
\param id Tile ID
|
||||
\return
|
||||
*/
|
||||
bool hasTile(std::uint32_t id) const { return id >= m_firstGID && id <= getLastGID(); };
|
||||
|
||||
/*!
|
||||
\brief queries tiles and returns a tile with the given ID. Checks if the TileID is part of the Tileset with `hasTile(id)`
|
||||
\param id Tile ID. The Tile ID will be corrected internally.
|
||||
\return In case of a success it returns the correct tile. In terms of failure it will return a nullptr.
|
||||
*/
|
||||
const Tile* getTile(std::uint32_t id) const;
|
||||
|
||||
private:
|
||||
|
||||
std::string m_workingDir;
|
||||
|
||||
std::uint32_t m_firstGID;
|
||||
std::string m_source;
|
||||
std::string m_name;
|
||||
std::string m_class;
|
||||
Vector2u m_tileSize;
|
||||
std::uint32_t m_spacing;
|
||||
std::uint32_t m_margin;
|
||||
std::uint32_t m_tileCount;
|
||||
std::uint32_t m_columnCount;
|
||||
ObjectAlignment m_objectAlignment;
|
||||
Vector2u m_tileOffset;
|
||||
|
||||
std::vector<Property> m_properties;
|
||||
std::string m_imagePath;
|
||||
Vector2u m_imageSize;
|
||||
Colour m_transparencyColour;
|
||||
bool m_hasTransparency;
|
||||
|
||||
std::vector<Terrain> m_terrainTypes;
|
||||
std::vector<std::uint32_t> m_tileIndex;
|
||||
std::vector<Tile> m_tiles;
|
||||
|
||||
void reset();
|
||||
|
||||
void parseOffsetNode(const pugi::xml_node&);
|
||||
void parsePropertyNode(const pugi::xml_node&);
|
||||
void parseTerrainNode(const pugi::xml_node&);
|
||||
Tile& newTile(std::uint32_t ID);
|
||||
void parseTileNode(const pugi::xml_node&, Map*);
|
||||
void createMissingTile(std::uint32_t ID);
|
||||
};
|
||||
}
|
||||
150
ext/tmxlite/include/tmxlite/Types.hpp
Normal file
150
ext/tmxlite/include/tmxlite/Types.hpp
Normal file
@@ -0,0 +1,150 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "tmxlite/Config.hpp"
|
||||
|
||||
#include <cstdint>
|
||||
#include <ostream>
|
||||
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Two dimensional vector used to store points and positions
|
||||
*/
|
||||
template <class T>
|
||||
struct Vector2 final
|
||||
{
|
||||
Vector2() : x(0), y(0) {}
|
||||
Vector2(T x, T y) :x(x), y(y) {}
|
||||
T x, y;
|
||||
};
|
||||
|
||||
using Vector2f = Vector2<float>;
|
||||
using Vector2i = Vector2<int>;
|
||||
using Vector2u = Vector2<unsigned>;
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator + (const Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator += (Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator - (const Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator -= (Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator * (const Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator *= (Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator * (const Vector2<T>& l, T r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator *= (Vector2<T>& l, T r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator / (const Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator /= (Vector2<T>& l, const Vector2<T>& r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator / (const Vector2<T>& l, T r);
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator /= (Vector2<T>& l, T r);
|
||||
|
||||
#include "Types.inl"
|
||||
|
||||
/*!
|
||||
\brief Describes a rectangular area, such as an AABB (axis aligned bounding box)
|
||||
*/
|
||||
template <class T>
|
||||
struct Rectangle final
|
||||
{
|
||||
Rectangle() : left(0), top(0), width(0), height(0) {}
|
||||
Rectangle(T l, T t, T w, T h) : left(l), top(t), width(w), height(h) {}
|
||||
Rectangle(Vector2<T> position, Vector2<T> size) : left(position.x), top(position.y), width(size.x), height(size.y) {}
|
||||
T left, top, width, height;
|
||||
};
|
||||
|
||||
using FloatRect = Rectangle<float>;
|
||||
using IntRect = Rectangle<int>;
|
||||
|
||||
/*!
|
||||
\brief Contains the red, green, blue and alpha values of a colour
|
||||
in the range 0 - 255.
|
||||
*/
|
||||
struct TMXLITE_EXPORT_API Colour final
|
||||
{
|
||||
Colour(std::uint8_t red = 0, std::uint8_t green = 0, std::uint8_t blue = 0, std::uint8_t alpha = 255)
|
||||
: r(red), g(green), b(blue), a(alpha) {}
|
||||
std::uint8_t r, g, b, a;
|
||||
|
||||
bool operator == (const Colour& other)
|
||||
{
|
||||
return other.r == r
|
||||
&& other.g == g
|
||||
&& other.b == b
|
||||
&& other.a == a;
|
||||
}
|
||||
|
||||
bool operator != (const Colour& other)
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
explicit operator std::uint32_t() const
|
||||
{
|
||||
return (r << 24) | (g << 16) | (b << 8) | a;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::ostream& operator << (std::ostream& os, const tmx::Vector2<T>& t)
|
||||
{
|
||||
os << "{" << t.x << ", " << t.y << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::ostream& operator << (std::ostream& os, const tmx::Rectangle<T>& t)
|
||||
{
|
||||
os << "{" << t.left << ", " << t.top << ", " << t.width << ", " << t.height << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator << (std::ostream& os, const tmx::Colour& c);
|
||||
110
ext/tmxlite/include/tmxlite/Types.inl
Normal file
110
ext/tmxlite/include/tmxlite/Types.inl
Normal file
@@ -0,0 +1,110 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator + (const Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
return { l.x + r.x, l.y + r.y };
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator += (Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
l.x += r.x;
|
||||
l.y += r.y;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator - (const Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
return { l.x - r.x, l.y - r.y };
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator -= (Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
l.x -= r.x;
|
||||
l.y -= r.y;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator * (const Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
return { l.x * r.x, l.y * r.y };
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator *= (Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
l.x *= r.x;
|
||||
l.y *= r.y;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator * (const Vector2<T>& l, T r)
|
||||
{
|
||||
return { l.x * r, l.y * r };
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator *= (Vector2<T>& l, T r)
|
||||
{
|
||||
l.x *= r;
|
||||
l.y *= r;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator / (const Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
return { l.x / r.x, l.y / r.y };
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator /= (Vector2<T>& l, const Vector2<T>& r)
|
||||
{
|
||||
l.x /= r.x;
|
||||
l.y /= r.y;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T> operator / (const Vector2<T>& l, T r)
|
||||
{
|
||||
return { l.x / r, l.y / r };
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Vector2<T>& operator /= (Vector2<T>& l, T r)
|
||||
{
|
||||
l.x /= r;
|
||||
l.y /= r;
|
||||
return l;
|
||||
}
|
||||
163
ext/tmxlite/include/tmxlite/detail/Log.hpp
Normal file
163
ext/tmxlite/include/tmxlite/detail/Log.hpp
Normal file
@@ -0,0 +1,163 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
//flexible logging class, based on code at https://github.com/fallahn/xygine
|
||||
|
||||
#ifndef TMXLITE_LOGGER_HPP_
|
||||
#define TMXLITE_LOGGER_HPP_
|
||||
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <list>
|
||||
#include <ctime>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define NOMINMAX
|
||||
#include <windows.h>
|
||||
#endif //_MSC_VER
|
||||
|
||||
namespace tmx
|
||||
{
|
||||
/*!
|
||||
\brief Class allowing messages to be logged to a combination
|
||||
of one or more destinations such as the console, log file or
|
||||
output window in Visual Studio
|
||||
*/
|
||||
class Logger final
|
||||
{
|
||||
public:
|
||||
enum class Output
|
||||
{
|
||||
Console,
|
||||
File,
|
||||
All
|
||||
};
|
||||
|
||||
enum class Type
|
||||
{
|
||||
Info,
|
||||
Warning,
|
||||
Error
|
||||
};
|
||||
/*!
|
||||
\brief Logs a message to a given destination.
|
||||
\param message Message to log
|
||||
\param type Whether this message gets tagged as information, a warning or an error
|
||||
\param output Destination for the message. Can be the console via cout, a log file on disk, or both
|
||||
*/
|
||||
static void log(const std::string& message, Type type = Type::Info, Output output = Output::Console)
|
||||
{
|
||||
std::string outstring;
|
||||
switch (type)
|
||||
{
|
||||
case Type::Info:
|
||||
default:
|
||||
outstring = "INFO: " + message;
|
||||
break;
|
||||
case Type::Error:
|
||||
outstring = "ERROR: " + message;
|
||||
break;
|
||||
case Type::Warning:
|
||||
outstring = "WARNING: " + message;
|
||||
break;
|
||||
}
|
||||
|
||||
if (output == Output::Console || output == Output::All)
|
||||
{
|
||||
if (type == Type::Error)
|
||||
{
|
||||
std::cerr << outstring << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << outstring << std::endl;
|
||||
}
|
||||
const std::size_t maxBuffer = 30;
|
||||
buffer().push_back(outstring);
|
||||
if (buffer().size() > maxBuffer)buffer().pop_front(); //no majick here pl0x
|
||||
updateOutString(maxBuffer);
|
||||
|
||||
#ifdef _MSC_VER
|
||||
outstring += "\n";
|
||||
OutputDebugStringA(outstring.c_str());
|
||||
#endif //_MSC_VER
|
||||
}
|
||||
if (output == Output::File || output == Output::All)
|
||||
{
|
||||
//output to a log file
|
||||
std::ofstream file("output.log", std::ios::app);
|
||||
if (file.good())
|
||||
{
|
||||
std::time_t time = std::time(nullptr);
|
||||
auto tm = *std::localtime(&time);
|
||||
//put_time isn't implemented by the ndk versions of the stl
|
||||
file.imbue(std::locale());
|
||||
file << std::put_time(&tm, "%d/%m/%y-%H:%M:%S: ");
|
||||
file << outstring << std::endl;
|
||||
file.close();
|
||||
}
|
||||
else
|
||||
{
|
||||
log(message, type, Output::Console);
|
||||
log("Above message was intended for log file. Opening file probably failed.", Type::Warning, Output::Console);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const std::string& bufferString(){ return stringOutput(); }
|
||||
|
||||
private:
|
||||
static std::list<std::string>& buffer(){ static std::list<std::string> buffer; return buffer; }
|
||||
static std::string& stringOutput() { static std::string output; return output; }
|
||||
static void updateOutString(std::size_t maxBuffer)
|
||||
{
|
||||
static size_t count = 0;
|
||||
stringOutput().append(buffer().back());
|
||||
stringOutput().append("\n");
|
||||
count++;
|
||||
|
||||
if (count > maxBuffer)
|
||||
{
|
||||
stringOutput() = stringOutput().substr(stringOutput().find_first_of('\n') + 1, stringOutput().size());
|
||||
count--;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
#ifdef NDEBUG
|
||||
#define LOG(message, type)
|
||||
#else
|
||||
#define LOG(message, type) {\
|
||||
std::stringstream ss; \
|
||||
ss << message << " (" << __FILE__ << ", " << __LINE__ << ")"; \
|
||||
tmx::Logger::log(ss.str(), type);}
|
||||
#endif //NDEBUG
|
||||
|
||||
#endif //TMXLITE_LOGGER_HPP_
|
||||
39
ext/tmxlite/include/tmxlite/detail/gzip.hpp
Normal file
39
ext/tmxlite/include/tmxlite/detail/gzip.hpp
Normal file
@@ -0,0 +1,39 @@
|
||||
// gzip.hpp - portable memory miniz based gzip reader
|
||||
// SPDX-License-Identifier: Zlib
|
||||
// SPDX-FileCopyrightText: (c) 2024 a dinosaur
|
||||
|
||||
#ifndef GZIP_HPP
|
||||
#define GZIP_HPP
|
||||
|
||||
#include "miniz.h"
|
||||
#include <cstdint>
|
||||
#include <span>
|
||||
|
||||
|
||||
class GZipReader
|
||||
{
|
||||
static constexpr uint8_t
|
||||
FTEXT = 1, FHCRC = 1<<1, FEXTRA = 1<<2, FNAME = 1<<3, FCOMMENT = 1<<4;
|
||||
|
||||
static constexpr uint8_t XFL_BEST = 2, XFL_FASTEST = 4;
|
||||
|
||||
tinfl_decompressor mState;
|
||||
std::span<const uint8_t>::iterator mIt;
|
||||
|
||||
size_t mSourceLen, mBytesRead;
|
||||
uint32_t mModificationTime, mCrc, mInputSize, mComputedCrc;
|
||||
uint16_t crc16;
|
||||
uint8_t mFlags, mXflags, mOsId;
|
||||
|
||||
public:
|
||||
GZipReader() noexcept;
|
||||
|
||||
constexpr size_t SourceLength() const noexcept { return mSourceLen; }
|
||||
constexpr uint32_t OutputLength() const noexcept { return mInputSize; }
|
||||
|
||||
bool OpenMemory(const std::span<const uint8_t> source) noexcept;
|
||||
bool Read(std::span<uint8_t> out) noexcept;
|
||||
bool Check() const noexcept;
|
||||
};
|
||||
|
||||
#endif//GZIP_HPP
|
||||
132
ext/tmxlite/src/FreeFuncs.cpp
Normal file
132
ext/tmxlite/src/FreeFuncs.cpp
Normal file
@@ -0,0 +1,132 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/Types.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
#ifndef USE_ZLIB
|
||||
# include "miniz.h"
|
||||
#else
|
||||
# include <zlib.h>
|
||||
#endif
|
||||
#include <cstring>
|
||||
|
||||
bool tmx::decompress(const char* source, std::vector<unsigned char>& dest, std::size_t inSize, std::size_t expectedSize)
|
||||
{
|
||||
if (!source)
|
||||
{
|
||||
LOG("Input string is empty, decompression failed.", Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
//#ifdef USE_EXTLIBS
|
||||
|
||||
|
||||
//#else
|
||||
int currentSize = static_cast<int>(expectedSize);
|
||||
std::vector<unsigned char> byteArray(expectedSize / sizeof(unsigned char));
|
||||
z_stream stream;
|
||||
stream.zalloc = Z_NULL;
|
||||
stream.zfree = Z_NULL;
|
||||
stream.opaque = Z_NULL;
|
||||
stream.next_in = (Bytef*)source;
|
||||
stream.avail_in = static_cast<unsigned int>(inSize);
|
||||
stream.next_out = (Bytef*)byteArray.data();
|
||||
stream.avail_out = static_cast<unsigned int>(expectedSize);
|
||||
|
||||
//we'd prefer to use inflateInit2 but it appears
|
||||
//to be incorrect in miniz. This is fine for zlib
|
||||
//compressed data, but gzip compressed streams
|
||||
//will fail to inflate.
|
||||
#ifdef USE_ZLIB
|
||||
if (inflateInit2(&stream, 15 + 32) != Z_OK)
|
||||
#else
|
||||
if (inflateInit(&stream) != Z_OK)
|
||||
#endif
|
||||
{
|
||||
LOG("inflate init failed", Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
int result = 0;
|
||||
do
|
||||
{
|
||||
result = inflate(&stream, Z_SYNC_FLUSH);
|
||||
|
||||
switch (result)
|
||||
{
|
||||
default: break;
|
||||
case Z_NEED_DICT:
|
||||
case Z_STREAM_ERROR:
|
||||
result = Z_DATA_ERROR;
|
||||
case Z_DATA_ERROR:
|
||||
Logger::log("If using gzip or zstd compression try using zlib instead", Logger::Type::Info);
|
||||
case Z_MEM_ERROR:
|
||||
inflateEnd(&stream);
|
||||
Logger::log("inflate() returned " + std::to_string(result), Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (result != Z_STREAM_END)
|
||||
{
|
||||
int oldSize = currentSize;
|
||||
currentSize *= 2;
|
||||
std::vector<unsigned char> newArray(currentSize / sizeof(unsigned char));
|
||||
std::memcpy(newArray.data(), byteArray.data(), currentSize / 2);
|
||||
byteArray = std::move(newArray);
|
||||
|
||||
stream.next_out = (Bytef*)(byteArray.data() + oldSize);
|
||||
stream.avail_out = oldSize;
|
||||
|
||||
}
|
||||
} while (result != Z_STREAM_END);
|
||||
|
||||
if (stream.avail_in != 0)
|
||||
{
|
||||
LOG("stream.avail_in is 0", Logger::Type::Error);
|
||||
LOG("zlib decompression failed.", Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
const int outSize = currentSize - stream.avail_out;
|
||||
inflateEnd(&stream);
|
||||
|
||||
std::vector<unsigned char> newArray(outSize / sizeof(unsigned char));
|
||||
std::memcpy(newArray.data(), byteArray.data(), outSize);
|
||||
byteArray = std::move(newArray);
|
||||
|
||||
//copy bytes to vector
|
||||
dest.insert(dest.begin(), byteArray.begin(), byteArray.end());
|
||||
//#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
std::ostream& operator << (std::ostream& os, const tmx::Colour& c)
|
||||
{
|
||||
os << "RGBA: " << (int)c.r << ", " << (int)c.g << ", " << (int)c.b << ", " << (int)c.a;
|
||||
return os;
|
||||
}
|
||||
107
ext/tmxlite/src/ImageLayer.cpp
Normal file
107
ext/tmxlite/src/ImageLayer.cpp
Normal file
@@ -0,0 +1,107 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/ImageLayer.hpp"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
ImageLayer::ImageLayer(const std::string& workingDir)
|
||||
: m_workingDir (workingDir),
|
||||
m_hasTransparency (false),
|
||||
m_hasRepeatX (false),
|
||||
m_hasRepeatY (false)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
//public
|
||||
void ImageLayer::parse(const pugi::xml_node& node, Map*)
|
||||
{
|
||||
std::string attribName = node.name();
|
||||
if (attribName != "imagelayer")
|
||||
{
|
||||
Logger::log("Node not an image layer, node skipped", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
//TODO this gets repeated foreach layer type and could all be moved to base class...
|
||||
setName(node.attribute("name").as_string());
|
||||
setClass(node.attribute("class").as_string());
|
||||
setOpacity(node.attribute("opacity").as_float(1.f));
|
||||
setVisible(node.attribute("visible").as_bool(true));
|
||||
setOffset(node.attribute("offsetx").as_int(0), node.attribute("offsety").as_int(0));
|
||||
setSize(node.attribute("width").as_uint(0), node.attribute("height").as_uint(0));
|
||||
setParallaxFactor(node.attribute("parallaxx").as_float(1.f), node.attribute("parallaxy").as_float(1.f));
|
||||
|
||||
std::string tintColour = node.attribute("tintcolor").as_string();
|
||||
if (!tintColour.empty())
|
||||
{
|
||||
setTintColour(colourFromString(tintColour));
|
||||
}
|
||||
|
||||
m_hasRepeatX = node.attribute("repeatx").as_bool(false);
|
||||
m_hasRepeatY = node.attribute("repeaty").as_bool(false);
|
||||
|
||||
for (const auto& child : node.children())
|
||||
{
|
||||
attribName = child.name();
|
||||
if (attribName == "image")
|
||||
{
|
||||
attribName = child.attribute("source").as_string();
|
||||
if (attribName.empty())
|
||||
{
|
||||
Logger::log("Image Layer has missing source property", Logger::Type::Warning);
|
||||
return;
|
||||
}
|
||||
|
||||
if (child.attribute("width") && child.attribute("height"))
|
||||
{
|
||||
m_imageSize.x = child.attribute("width").as_uint();
|
||||
m_imageSize.y = child.attribute("height").as_uint();
|
||||
}
|
||||
|
||||
m_filePath = resolveFilePath(attribName, m_workingDir);
|
||||
if (child.attribute("trans"))
|
||||
{
|
||||
attribName = child.attribute("trans").as_string();
|
||||
m_transparencyColour = colourFromString(attribName);
|
||||
m_hasTransparency = true;
|
||||
}
|
||||
}
|
||||
else if (attribName == "properties")
|
||||
{
|
||||
for (const auto& p : child.children())
|
||||
{
|
||||
addProperty(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
106
ext/tmxlite/src/LayerGroup.cpp
Normal file
106
ext/tmxlite/src/LayerGroup.cpp
Normal file
@@ -0,0 +1,106 @@
|
||||
/*********************************************************************
|
||||
Grant Gangi 2019
|
||||
Matt Marchant 2023
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/LayerGroup.hpp"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/ObjectGroup.hpp"
|
||||
#include "tmxlite/ImageLayer.hpp"
|
||||
#include "tmxlite/TileLayer.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
LayerGroup::LayerGroup(const std::string& workingDir, const Vector2u& tileCount)
|
||||
: m_workingDir(workingDir),
|
||||
m_tileCount(tileCount)
|
||||
{
|
||||
}
|
||||
|
||||
//public
|
||||
void LayerGroup::parse(const pugi::xml_node& node, Map* map)
|
||||
{
|
||||
assert(map);
|
||||
std::string attribString = node.name();
|
||||
if (attribString != "group")
|
||||
{
|
||||
Logger::log("Node was not a group layer, node will be skipped.", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
setName(node.attribute("name").as_string());
|
||||
setClass(node.attribute("class").as_string());
|
||||
setOpacity(node.attribute("opacity").as_float(1.f));
|
||||
setVisible(node.attribute("visible").as_bool(true));
|
||||
setOffset(node.attribute("offsetx").as_int(0), node.attribute("offsety").as_int(0));
|
||||
setSize(node.attribute("width").as_uint(0), node.attribute("height").as_uint(0));
|
||||
setParallaxFactor(node.attribute("parallaxx").as_float(1.f), node.attribute("parallaxy").as_float(1.f));
|
||||
|
||||
std::string tintColour = node.attribute("tintcolor").as_string();
|
||||
if (!tintColour.empty())
|
||||
{
|
||||
setTintColour(colourFromString(tintColour));
|
||||
}
|
||||
|
||||
// parse children
|
||||
for (const auto& child : node.children())
|
||||
{
|
||||
attribString = child.name();
|
||||
if (attribString == "properties")
|
||||
{
|
||||
for (const auto& p : child.children())
|
||||
{
|
||||
addProperty(p);
|
||||
}
|
||||
}
|
||||
else if (attribString == "layer")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<TileLayer>(m_tileCount.x * m_tileCount.y));
|
||||
m_layers.back()->parse(child, map);
|
||||
}
|
||||
else if (attribString == "objectgroup")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<ObjectGroup>());
|
||||
m_layers.back()->parse(child, map);
|
||||
}
|
||||
else if (attribString == "imagelayer")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<ImageLayer>(m_workingDir));
|
||||
m_layers.back()->parse(child, map);
|
||||
}
|
||||
else if (attribString == "group")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<LayerGroup>(m_workingDir, m_tileCount));
|
||||
m_layers.back()->parse(child, map);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG("Unidentified name " + attribString + ": node skipped", Logger::Type::Warning);
|
||||
}
|
||||
}
|
||||
}
|
||||
362
ext/tmxlite/src/Map.cpp
Normal file
362
ext/tmxlite/src/Map.cpp
Normal file
@@ -0,0 +1,362 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/Map.hpp"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/ObjectGroup.hpp"
|
||||
#include "tmxlite/ImageLayer.hpp"
|
||||
#include "tmxlite/TileLayer.hpp"
|
||||
#include "tmxlite/LayerGroup.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
#include <queue>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
Map::Map()
|
||||
: m_orientation (Orientation::None),
|
||||
m_renderOrder (RenderOrder::None),
|
||||
m_infinite (false),
|
||||
m_hexSideLength (0.f),
|
||||
m_staggerAxis (StaggerAxis::None),
|
||||
m_staggerIndex (StaggerIndex::None)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
//public
|
||||
bool Map::load(const std::string& path)
|
||||
{
|
||||
reset();
|
||||
|
||||
//open the doc
|
||||
pugi::xml_document doc;
|
||||
auto result = doc.load_file(path.c_str());
|
||||
if (!result)
|
||||
{
|
||||
Logger::log("Failed opening " + path, Logger::Type::Error);
|
||||
Logger::log("Reason: " + std::string(result.description()), Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
//make sure we have consistent path separators
|
||||
m_workingDirectory = path;
|
||||
std::replace(m_workingDirectory.begin(), m_workingDirectory.end(), '\\', '/');
|
||||
m_workingDirectory = getFilePath(m_workingDirectory);
|
||||
|
||||
if (!m_workingDirectory.empty() &&
|
||||
m_workingDirectory.back() == '/')
|
||||
{
|
||||
m_workingDirectory.pop_back();
|
||||
}
|
||||
|
||||
|
||||
//find the map node and bail if it doesn't exist
|
||||
auto mapNode = doc.child("map");
|
||||
if (!mapNode)
|
||||
{
|
||||
Logger::log("Failed opening map: " + path + ", no map node found", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
return parseMapNode(mapNode);
|
||||
}
|
||||
|
||||
bool Map::loadFromString(const std::string& data, const std::string& workingDir)
|
||||
{
|
||||
reset();
|
||||
|
||||
//open the doc
|
||||
pugi::xml_document doc;
|
||||
auto result = doc.load_string(data.c_str());
|
||||
if (!result)
|
||||
{
|
||||
Logger::log("Failed opening map", Logger::Type::Error);
|
||||
Logger::log("Reason: " + std::string(result.description()), Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
//make sure we have consistent path separators
|
||||
m_workingDirectory = workingDir;
|
||||
std::replace(m_workingDirectory.begin(), m_workingDirectory.end(), '\\', '/');
|
||||
m_workingDirectory = getFilePath(m_workingDirectory);
|
||||
|
||||
if (!m_workingDirectory.empty() &&
|
||||
m_workingDirectory.back() == '/')
|
||||
{
|
||||
m_workingDirectory.pop_back();
|
||||
}
|
||||
|
||||
//find the map node and bail if it doesn't exist
|
||||
auto mapNode = doc.child("map");
|
||||
if (!mapNode)
|
||||
{
|
||||
Logger::log("Failed opening map: no map node found", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
return parseMapNode(mapNode);
|
||||
}
|
||||
|
||||
//private
|
||||
bool Map::parseMapNode(const pugi::xml_node& mapNode)
|
||||
{
|
||||
//parse map attributes
|
||||
std::size_t pointPos = 0;
|
||||
std::string attribString = mapNode.attribute("version").as_string();
|
||||
if (attribString.empty() || (pointPos = attribString.find('.')) == std::string::npos)
|
||||
{
|
||||
Logger::log("Invalid map version value, map not loaded.", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
m_version.upper = std::stoi(attribString.substr(0, pointPos));
|
||||
m_version.lower = std::stoi(attribString.substr(pointPos + 1));
|
||||
|
||||
m_class = mapNode.attribute("class").as_string();
|
||||
|
||||
attribString = mapNode.attribute("orientation").as_string();
|
||||
if (attribString.empty())
|
||||
{
|
||||
Logger::log("Missing map orientation attribute, map not loaded.", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
if (attribString == "orthogonal")
|
||||
{
|
||||
m_orientation = Orientation::Orthogonal;
|
||||
}
|
||||
else if (attribString == "isometric")
|
||||
{
|
||||
m_orientation = Orientation::Isometric;
|
||||
}
|
||||
else if (attribString == "staggered")
|
||||
{
|
||||
m_orientation = Orientation::Staggered;
|
||||
}
|
||||
else if (attribString == "hexagonal")
|
||||
{
|
||||
m_orientation = Orientation::Hexagonal;
|
||||
}
|
||||
else
|
||||
{
|
||||
Logger::log(attribString + " format maps aren't supported yet, sorry! Map not loaded", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
attribString = mapNode.attribute("renderorder").as_string();
|
||||
//this property is optional for older version of map files
|
||||
if (!attribString.empty())
|
||||
{
|
||||
if (attribString == "right-down")
|
||||
{
|
||||
m_renderOrder = RenderOrder::RightDown;
|
||||
}
|
||||
else if (attribString == "right-up")
|
||||
{
|
||||
m_renderOrder = RenderOrder::RightUp;
|
||||
}
|
||||
else if (attribString == "left-down")
|
||||
{
|
||||
m_renderOrder = RenderOrder::LeftDown;
|
||||
}
|
||||
else if (attribString == "left-up")
|
||||
{
|
||||
m_renderOrder = RenderOrder::LeftUp;
|
||||
}
|
||||
else
|
||||
{
|
||||
Logger::log(attribString + ": invalid render order. Map not loaded.", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
}
|
||||
|
||||
if (mapNode.attribute("infinite"))
|
||||
{
|
||||
m_infinite = mapNode.attribute("infinite").as_int() != 0;
|
||||
}
|
||||
|
||||
unsigned width = mapNode.attribute("width").as_int();
|
||||
unsigned height = mapNode.attribute("height").as_int();
|
||||
if (width && height)
|
||||
{
|
||||
m_tileCount = { width, height };
|
||||
}
|
||||
else
|
||||
{
|
||||
Logger::log("Invalid map tile count, map not loaded", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
width = mapNode.attribute("tilewidth").as_int();
|
||||
height = mapNode.attribute("tileheight").as_int();
|
||||
if (width && height)
|
||||
{
|
||||
m_tileSize = { width, height };
|
||||
}
|
||||
else
|
||||
{
|
||||
Logger::log("Invalid tile size, map not loaded", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
m_hexSideLength = mapNode.attribute("hexsidelength").as_float();
|
||||
if (m_orientation == Orientation::Hexagonal && m_hexSideLength <= 0)
|
||||
{
|
||||
Logger::log("Invalid he side length found, map not loaded", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
attribString = mapNode.attribute("staggeraxis").as_string();
|
||||
if (attribString == "x")
|
||||
{
|
||||
m_staggerAxis = StaggerAxis::X;
|
||||
}
|
||||
else if (attribString == "y")
|
||||
{
|
||||
m_staggerAxis = StaggerAxis::Y;
|
||||
}
|
||||
if ((m_orientation == Orientation::Staggered || m_orientation == Orientation::Hexagonal)
|
||||
&& m_staggerAxis == StaggerAxis::None)
|
||||
{
|
||||
Logger::log("Map missing stagger axis property. Map not loaded.", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
attribString = mapNode.attribute("staggerindex").as_string();
|
||||
if (attribString == "odd")
|
||||
{
|
||||
m_staggerIndex = StaggerIndex::Odd;
|
||||
}
|
||||
else if (attribString == "even")
|
||||
{
|
||||
m_staggerIndex = StaggerIndex::Even;
|
||||
}
|
||||
if ((m_orientation == Orientation::Staggered || m_orientation == Orientation::Hexagonal)
|
||||
&& m_staggerIndex == StaggerIndex::None)
|
||||
{
|
||||
Logger::log("Map missing stagger index property. Map not loaded.", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
m_parallaxOrigin =
|
||||
{
|
||||
mapNode.attribute("parallaxoriginx").as_float(0.f),
|
||||
mapNode.attribute("parallaxoriginy").as_float(0.f)
|
||||
};
|
||||
|
||||
//colour property is optional
|
||||
attribString = mapNode.attribute("backgroundcolor").as_string();
|
||||
if (!attribString.empty())
|
||||
{
|
||||
m_backgroundColour = colourFromString(attribString);
|
||||
}
|
||||
|
||||
//TODO do we need next object ID
|
||||
|
||||
//parse all child nodes
|
||||
for (const auto& node : mapNode.children())
|
||||
{
|
||||
std::string name = node.name();
|
||||
if (name == "tileset")
|
||||
{
|
||||
m_tilesets.emplace_back(m_workingDirectory);
|
||||
m_tilesets.back().parse(node, this);
|
||||
}
|
||||
else if (name == "layer")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<TileLayer>(m_tileCount.x * m_tileCount.y));
|
||||
m_layers.back()->parse(node);
|
||||
}
|
||||
else if (name == "objectgroup")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<ObjectGroup>());
|
||||
m_layers.back()->parse(node, this);
|
||||
}
|
||||
else if (name == "imagelayer")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<ImageLayer>(m_workingDirectory));
|
||||
m_layers.back()->parse(node, this);
|
||||
}
|
||||
else if (name == "properties")
|
||||
{
|
||||
const auto& children = node.children();
|
||||
for (const auto& child : children)
|
||||
{
|
||||
m_properties.emplace_back();
|
||||
m_properties.back().parse(child);
|
||||
}
|
||||
}
|
||||
else if (name == "group")
|
||||
{
|
||||
m_layers.emplace_back(std::make_unique<LayerGroup>(m_workingDirectory, m_tileCount));
|
||||
m_layers.back()->parse(node, this);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG("Unidentified name " + name + ": node skipped", Logger::Type::Warning);
|
||||
}
|
||||
}
|
||||
// fill animated tiles for easier lookup into map
|
||||
for(const auto& ts : m_tilesets)
|
||||
{
|
||||
for(const auto& tile : ts.getTiles())
|
||||
{
|
||||
if (!tile.animation.frames.empty())
|
||||
{
|
||||
m_animTiles[tile.ID + ts.getFirstGID()] = tile;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Map::reset()
|
||||
{
|
||||
m_orientation = Orientation::None;
|
||||
m_renderOrder = RenderOrder::None;
|
||||
m_tileCount = { 0u, 0u };
|
||||
m_tileSize = { 0u, 0u };
|
||||
m_hexSideLength = 0.f;
|
||||
m_staggerAxis = StaggerAxis::None;
|
||||
m_staggerIndex = StaggerIndex::None;
|
||||
m_backgroundColour = {};
|
||||
m_workingDirectory = "";
|
||||
|
||||
m_tilesets.clear();
|
||||
m_layers.clear();
|
||||
m_properties.clear();
|
||||
|
||||
m_templateObjects.clear();
|
||||
m_templateTilesets.clear();
|
||||
|
||||
m_animTiles.clear();
|
||||
|
||||
return false;
|
||||
}
|
||||
399
ext/tmxlite/src/Object.cpp
Normal file
399
ext/tmxlite/src/Object.cpp
Normal file
@@ -0,0 +1,399 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/Object.hpp"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/Map.hpp"
|
||||
#include "tmxlite/Tileset.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
#include <sstream>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
Object::Object()
|
||||
: m_UID (0),
|
||||
m_rotation (0.f),
|
||||
m_tileID (0),
|
||||
m_flipFlags (0),
|
||||
m_visible (true),
|
||||
m_shape (Shape::Rectangle)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
//public
|
||||
void Object::parse(const pugi::xml_node& node, Map* map)
|
||||
{
|
||||
std::string attribString = node.name();
|
||||
if (attribString != "object")
|
||||
{
|
||||
Logger::log("This not an Object node, parsing skipped.", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
m_UID = node.attribute("id").as_int();
|
||||
m_name = node.attribute("name").as_string();
|
||||
|
||||
m_class = node.attribute("type").as_string();
|
||||
if (m_class.empty())
|
||||
{
|
||||
m_class = node.attribute("class").as_string();
|
||||
}
|
||||
|
||||
m_position.x = node.attribute("x").as_float();
|
||||
m_AABB.left = m_position.x;
|
||||
m_position.y = node.attribute("y").as_float();
|
||||
m_AABB.top = m_position.y;
|
||||
m_AABB.width = node.attribute("width").as_float();
|
||||
m_AABB.height = node.attribute("height").as_float();
|
||||
m_rotation = node.attribute("rotation").as_float();
|
||||
m_visible = node.attribute("visible").as_bool(true);
|
||||
|
||||
m_tileID = node.attribute("gid").as_uint();
|
||||
|
||||
static const std::uint32_t mask = 0xf0000000;
|
||||
m_flipFlags = ((m_tileID & mask) >> 28);
|
||||
m_tileID = m_tileID & ~mask;
|
||||
|
||||
for (const auto& child : node.children())
|
||||
{
|
||||
attribString = child.name();
|
||||
if (attribString == "properties")
|
||||
{
|
||||
for (const auto& p : child.children())
|
||||
{
|
||||
m_properties.emplace_back();
|
||||
m_properties.back().parse(p);
|
||||
}
|
||||
}
|
||||
else if (attribString == "ellipse")
|
||||
{
|
||||
m_shape = Shape::Ellipse;
|
||||
}
|
||||
else if (attribString == "point")
|
||||
{
|
||||
m_shape = Shape::Point;
|
||||
}
|
||||
else if (attribString == "polygon")
|
||||
{
|
||||
m_shape = Shape::Polygon;
|
||||
parsePoints(child);
|
||||
}
|
||||
else if (attribString == "polyline")
|
||||
{
|
||||
m_shape = Shape::Polyline;
|
||||
parsePoints(child);
|
||||
}
|
||||
else if (attribString == "text")
|
||||
{
|
||||
m_shape = Shape::Text;
|
||||
parseText(child);
|
||||
}
|
||||
}
|
||||
|
||||
//parse templates last so we know which properties
|
||||
//ought to be overridden
|
||||
std::string templateStr = node.attribute("template").as_string();
|
||||
if (!templateStr.empty() && map)
|
||||
{
|
||||
parseTemplate(templateStr, map);
|
||||
}
|
||||
}
|
||||
|
||||
//private
|
||||
void Object::parsePoints(const pugi::xml_node& node)
|
||||
{
|
||||
if (node.attribute("points"))
|
||||
{
|
||||
std::string pointlist = node.attribute("points").as_string();
|
||||
std::stringstream stream(pointlist);
|
||||
std::vector<std::string> points;
|
||||
std::string pointstring;
|
||||
while (std::getline(stream, pointstring, ' '))
|
||||
{
|
||||
points.push_back(pointstring);
|
||||
}
|
||||
|
||||
//parse each pair into sf::vector2f
|
||||
for (unsigned int i = 0; i < points.size(); i++)
|
||||
{
|
||||
std::vector<float> coords;
|
||||
std::stringstream coordstream(points[i]);
|
||||
|
||||
float j;
|
||||
while (coordstream >> j)
|
||||
{
|
||||
coords.push_back(j);
|
||||
//TODO this should really ignore anything non-numeric
|
||||
if (coordstream.peek() == ',')
|
||||
{
|
||||
coordstream.ignore();
|
||||
}
|
||||
}
|
||||
m_points.emplace_back(coords[0], coords[1]);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Logger::log("Points for polygon or polyline object are missing", Logger::Type::Warning);
|
||||
}
|
||||
}
|
||||
|
||||
void Object::parseText(const pugi::xml_node& node)
|
||||
{
|
||||
m_textData.bold = node.attribute("bold").as_bool(false);
|
||||
m_textData.colour = colourFromString(node.attribute("color").as_string("#FFFFFFFF"));
|
||||
m_textData.fontFamily = node.attribute("fontfamily").as_string();
|
||||
m_textData.italic = node.attribute("italic").as_bool(false);
|
||||
m_textData.kerning = node.attribute("kerning").as_bool(true);
|
||||
m_textData.pixelSize = node.attribute("pixelsize").as_uint(16);
|
||||
m_textData.strikethough = node.attribute("strikeout").as_bool(false);
|
||||
m_textData.underline = node.attribute("underline").as_bool(false);
|
||||
m_textData.wrap = node.attribute("wrap").as_bool(false);
|
||||
|
||||
std::string alignment = node.attribute("halign").as_string("left");
|
||||
if (alignment == "left")
|
||||
{
|
||||
m_textData.hAlign = Text::HAlign::Left;
|
||||
}
|
||||
else if (alignment == "center")
|
||||
{
|
||||
m_textData.hAlign = Text::HAlign::Centre;
|
||||
}
|
||||
else if (alignment == "right")
|
||||
{
|
||||
m_textData.hAlign = Text::HAlign::Right;
|
||||
}
|
||||
|
||||
alignment = node.attribute("valign").as_string("top");
|
||||
if (alignment == "top")
|
||||
{
|
||||
m_textData.vAlign = Text::VAlign::Top;
|
||||
}
|
||||
else if (alignment == "center")
|
||||
{
|
||||
m_textData.vAlign = Text::VAlign::Centre;
|
||||
}
|
||||
else if (alignment == "bottom")
|
||||
{
|
||||
m_textData.vAlign = Text::VAlign::Bottom;
|
||||
}
|
||||
|
||||
m_textData.content = node.text().as_string();
|
||||
}
|
||||
|
||||
void Object::parseTemplate(const std::string& path, Map* map)
|
||||
{
|
||||
assert(map);
|
||||
|
||||
auto& templateObjects = map->getTemplateObjects();
|
||||
auto& templateTilesets = map->getTemplateTilesets();
|
||||
|
||||
//load the template if not already loaded
|
||||
if (templateObjects.count(path) == 0)
|
||||
{
|
||||
auto templatePath = map->getWorkingDirectory() + "/" + path;
|
||||
|
||||
pugi::xml_document doc;
|
||||
if (!doc.load_file(templatePath.c_str()))
|
||||
{
|
||||
Logger::log("Failed opening template file " + path, Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
auto templateNode = doc.child("template");
|
||||
if (!templateNode)
|
||||
{
|
||||
Logger::log("Template node missing from " + path, Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
//if the template has a tileset load that (if not already loaded)
|
||||
std::string tilesetName;
|
||||
auto tileset = templateNode.child("tileset");
|
||||
if (tileset)
|
||||
{
|
||||
tilesetName = tileset.attribute("source").as_string();
|
||||
if (!tilesetName.empty() &&
|
||||
templateTilesets.count(tilesetName) == 0)
|
||||
{
|
||||
templateTilesets.insert(std::make_pair(tilesetName, Tileset(map->getWorkingDirectory())));
|
||||
templateTilesets.at(tilesetName).parse(tileset, map);
|
||||
}
|
||||
}
|
||||
|
||||
//parse the object - don't pass the map pointer here so there's
|
||||
//no recursion if someone tried to get clever and put a template in a template
|
||||
auto obj = templateNode.child("object");
|
||||
if (obj)
|
||||
{
|
||||
templateObjects.insert(std::make_pair(path, Object()));
|
||||
templateObjects[path].parse(obj, nullptr);
|
||||
templateObjects[path].m_tilesetName = tilesetName;
|
||||
}
|
||||
}
|
||||
|
||||
//apply any non-overridden object properties from the template
|
||||
if (templateObjects.count(path) != 0)
|
||||
{
|
||||
const auto& obj = templateObjects[path];
|
||||
if (m_AABB.width == 0)
|
||||
{
|
||||
m_AABB.width = obj.m_AABB.width;
|
||||
}
|
||||
|
||||
if (m_AABB.height == 0)
|
||||
{
|
||||
m_AABB.height = obj.m_AABB.height;
|
||||
}
|
||||
|
||||
m_tilesetName = obj.m_tilesetName;
|
||||
|
||||
if (m_name.empty())
|
||||
{
|
||||
m_name = obj.m_name;
|
||||
}
|
||||
|
||||
if (m_class.empty())
|
||||
{
|
||||
m_class = obj.m_class;
|
||||
}
|
||||
|
||||
if (m_rotation == 0)
|
||||
{
|
||||
m_rotation = obj.m_rotation;
|
||||
}
|
||||
|
||||
if (m_tileID == 0)
|
||||
{
|
||||
m_tileID = obj.m_tileID;
|
||||
}
|
||||
|
||||
if (m_flipFlags == 0)
|
||||
{
|
||||
m_flipFlags = obj.m_flipFlags;
|
||||
}
|
||||
|
||||
if (m_shape == Shape::Rectangle)
|
||||
{
|
||||
m_shape = obj.m_shape;
|
||||
}
|
||||
|
||||
if (m_points.empty())
|
||||
{
|
||||
m_points = obj.m_points;
|
||||
}
|
||||
|
||||
//compare properties and only copy ones that don't exist
|
||||
for (const auto& p : obj.m_properties)
|
||||
{
|
||||
auto result = std::find_if(m_properties.begin(), m_properties.end(),
|
||||
[&p](const Property& a)
|
||||
{
|
||||
return a.getName() == p.getName();
|
||||
});
|
||||
|
||||
if (result == m_properties.end())
|
||||
{
|
||||
m_properties.push_back(p);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (m_shape == Shape::Text)
|
||||
{
|
||||
//check each text property and update as necessary
|
||||
//TODO this makes he assumption we prefer the template
|
||||
//properties over the default ones - this might not
|
||||
//actually be the case....
|
||||
const auto& otherText = obj.m_textData;
|
||||
if (m_textData.fontFamily.empty())
|
||||
{
|
||||
m_textData.fontFamily = otherText.fontFamily;
|
||||
}
|
||||
|
||||
if (m_textData.pixelSize == 16)
|
||||
{
|
||||
m_textData.pixelSize = otherText.pixelSize;
|
||||
}
|
||||
|
||||
//TODO this isn't actually right if we *want* to be false
|
||||
//and the template is set to true...
|
||||
if (m_textData.wrap == false)
|
||||
{
|
||||
m_textData.wrap = otherText.wrap;
|
||||
}
|
||||
|
||||
if (m_textData.colour == Colour())
|
||||
{
|
||||
m_textData.colour = otherText.colour;
|
||||
}
|
||||
|
||||
if (m_textData.bold == false)
|
||||
{
|
||||
m_textData.bold = otherText.bold;
|
||||
}
|
||||
|
||||
if (m_textData.italic == false)
|
||||
{
|
||||
m_textData.italic = otherText.italic;
|
||||
}
|
||||
|
||||
if (m_textData.underline == false)
|
||||
{
|
||||
m_textData.underline = otherText.underline;
|
||||
}
|
||||
|
||||
if (m_textData.strikethough == false)
|
||||
{
|
||||
m_textData.strikethough = otherText.strikethough;
|
||||
}
|
||||
|
||||
if (m_textData.kerning == true)
|
||||
{
|
||||
m_textData.kerning = otherText.kerning;
|
||||
}
|
||||
|
||||
if (m_textData.hAlign == Text::HAlign::Left)
|
||||
{
|
||||
m_textData.hAlign = otherText.hAlign;
|
||||
}
|
||||
|
||||
if (m_textData.vAlign == Text::VAlign::Top)
|
||||
{
|
||||
m_textData.vAlign = otherText.vAlign;
|
||||
}
|
||||
|
||||
if (m_textData.content.empty())
|
||||
{
|
||||
m_textData.content = otherText.content;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
99
ext/tmxlite/src/ObjectGroup.cpp
Normal file
99
ext/tmxlite/src/ObjectGroup.cpp
Normal file
@@ -0,0 +1,99 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/ObjectGroup.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
ObjectGroup::ObjectGroup()
|
||||
: m_colour (127, 127, 127, 255),
|
||||
m_drawOrder (DrawOrder::TopDown)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
//public
|
||||
void ObjectGroup::parse(const pugi::xml_node& node, Map* map)
|
||||
{
|
||||
assert(map);
|
||||
|
||||
std::string attribString = node.name();
|
||||
if (attribString != "objectgroup")
|
||||
{
|
||||
Logger::log("Node was not an object group, node will be skipped.", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
setName(node.attribute("name").as_string());
|
||||
setClass(node.attribute("class").as_string());
|
||||
|
||||
attribString = node.attribute("color").as_string();
|
||||
if (!attribString.empty())
|
||||
{
|
||||
m_colour = colourFromString(attribString);
|
||||
}
|
||||
|
||||
setOpacity(node.attribute("opacity").as_float(1.f));
|
||||
setVisible(node.attribute("visible").as_bool(true));
|
||||
setOffset(node.attribute("offsetx").as_int(0), node.attribute("offsety").as_int(0));
|
||||
setSize(node.attribute("width").as_uint(0), node.attribute("height").as_uint(0));
|
||||
setParallaxFactor(node.attribute("parallaxx").as_float(1.f), node.attribute("parallaxy").as_float(1.f));
|
||||
|
||||
std::string tintColour = node.attribute("tintcolor").as_string();
|
||||
if (!tintColour.empty())
|
||||
{
|
||||
setTintColour(colourFromString(tintColour));
|
||||
}
|
||||
|
||||
attribString = node.attribute("draworder").as_string();
|
||||
if (attribString == "index")
|
||||
{
|
||||
m_drawOrder = DrawOrder::Index;
|
||||
}
|
||||
|
||||
for (const auto& child : node.children())
|
||||
{
|
||||
attribString = child.name();
|
||||
if (attribString == "properties")
|
||||
{
|
||||
for (const auto& p : child)
|
||||
{
|
||||
m_properties.emplace_back();
|
||||
m_properties.back().parse(p);
|
||||
}
|
||||
}
|
||||
else if (attribString == "object")
|
||||
{
|
||||
m_objects.emplace_back();
|
||||
m_objects.back().parse(child, map);
|
||||
}
|
||||
}
|
||||
}
|
||||
151
ext/tmxlite/src/ObjectTypes.cpp
Normal file
151
ext/tmxlite/src/ObjectTypes.cpp
Normal file
@@ -0,0 +1,151 @@
|
||||
/*********************************************************************
|
||||
Raphaël Frantz 2021
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/ObjectTypes.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
bool ObjectTypes::load(const std::string &path)
|
||||
{
|
||||
reset();
|
||||
|
||||
//open the doc
|
||||
pugi::xml_document doc;
|
||||
auto result = doc.load_file(path.c_str());
|
||||
if (!result)
|
||||
{
|
||||
Logger::log("Failed opening " + path, Logger::Type::Error);
|
||||
Logger::log("Reason: " + std::string(result.description()), Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
//make sure we have consistent path separators
|
||||
m_workingDirectory = path;
|
||||
std::replace(m_workingDirectory.begin(), m_workingDirectory.end(), '\\', '/');
|
||||
m_workingDirectory = getFilePath(m_workingDirectory);
|
||||
|
||||
if (!m_workingDirectory.empty() &&
|
||||
m_workingDirectory.back() == '/')
|
||||
{
|
||||
m_workingDirectory.pop_back();
|
||||
}
|
||||
|
||||
|
||||
//find the node and bail if it doesn't exist
|
||||
auto node = doc.child("objecttypes");
|
||||
if (!node)
|
||||
{
|
||||
Logger::log("Failed opening object types: " + path + ", no objecttype node found", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
return parseObjectTypesNode(node);
|
||||
}
|
||||
|
||||
bool ObjectTypes::loadFromString(const std::string &data, const std::string &workingDir)
|
||||
{
|
||||
reset();
|
||||
|
||||
//open the doc
|
||||
pugi::xml_document doc;
|
||||
auto result = doc.load_string(data.c_str());
|
||||
if (!result)
|
||||
{
|
||||
Logger::log("Failed opening object types", Logger::Type::Error);
|
||||
Logger::log("Reason: " + std::string(result.description()), Logger::Type::Error);
|
||||
return false;
|
||||
}
|
||||
|
||||
//make sure we have consistent path separators
|
||||
m_workingDirectory = workingDir;
|
||||
std::replace(m_workingDirectory.begin(), m_workingDirectory.end(), '\\', '/');
|
||||
m_workingDirectory = getFilePath(m_workingDirectory);
|
||||
|
||||
if (!m_workingDirectory.empty() &&
|
||||
m_workingDirectory.back() == '/')
|
||||
{
|
||||
m_workingDirectory.pop_back();
|
||||
}
|
||||
|
||||
|
||||
//find the node and bail if it doesn't exist
|
||||
auto node = doc.child("objecttypes");
|
||||
if (!node)
|
||||
{
|
||||
Logger::log("Failed object types: no objecttypes node found", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
return parseObjectTypesNode(node);
|
||||
}
|
||||
|
||||
bool ObjectTypes::parseObjectTypesNode(const pugi::xml_node &node)
|
||||
{
|
||||
//<objecttypes> <-- node
|
||||
// <objecttype name="Character" color="#1e47ff">
|
||||
// <property>...
|
||||
|
||||
//parse types
|
||||
for(const auto& child : node.children())
|
||||
{
|
||||
std::string attribString = child.name();
|
||||
if (attribString == "objecttype")
|
||||
{
|
||||
Type type;
|
||||
|
||||
//parse the metadata of the type
|
||||
type.name = child.attribute("name").as_string();
|
||||
type.colour = colourFromString(child.attribute("color").as_string("#FFFFFFFF"));;
|
||||
|
||||
//parse the default properties of the type
|
||||
for (const auto& p : child.children())
|
||||
{
|
||||
Property prop;
|
||||
prop.parse(p, true);
|
||||
type.properties.push_back(prop);
|
||||
}
|
||||
|
||||
m_types.push_back(type);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG("Unidentified name " + attribString + ": node skipped", Logger::Type::Warning);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ObjectTypes::reset()
|
||||
{
|
||||
m_workingDirectory.clear();
|
||||
m_types.clear();
|
||||
return false;
|
||||
}
|
||||
164
ext/tmxlite/src/Property.cpp
Normal file
164
ext/tmxlite/src/Property.cpp
Normal file
@@ -0,0 +1,164 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2021
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/Property.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
Property::Property()
|
||||
: m_type(Type::Undef)
|
||||
{
|
||||
}
|
||||
|
||||
Property Property::fromBoolean(bool value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::Boolean;
|
||||
p.m_boolValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
Property Property::fromFloat(float value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::Float;
|
||||
p.m_floatValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
Property Property::fromInt(int value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::Int;
|
||||
p.m_intValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
Property Property::fromString(const std::string& value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::String;
|
||||
p.m_stringValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
Property Property::fromColour(const Colour& value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::Colour;
|
||||
p.m_colourValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
Property Property::fromFile(const std::string& value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::File;
|
||||
p.m_stringValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
Property Property::fromObject(int value)
|
||||
{
|
||||
Property p;
|
||||
p.m_type = Type::Object;
|
||||
p.m_intValue = value;
|
||||
return p;
|
||||
}
|
||||
|
||||
//public
|
||||
void Property::parse(const pugi::xml_node& node, bool isObjectTypes)
|
||||
{
|
||||
// The value attribute name is different in object types
|
||||
const char *const valueAttribute = isObjectTypes ? "default" : "value";
|
||||
|
||||
std::string attribData = node.name();
|
||||
if (attribData != "property")
|
||||
{
|
||||
Logger::log("Node was not a valid property, node will be skipped", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
m_name = node.attribute("name").as_string();
|
||||
|
||||
attribData = node.attribute("type").as_string("string");
|
||||
if (attribData == "bool")
|
||||
{
|
||||
attribData = node.attribute(valueAttribute).as_string("false");
|
||||
m_boolValue = (attribData == "true");
|
||||
m_type = Type::Boolean;
|
||||
return;
|
||||
}
|
||||
else if (attribData == "int")
|
||||
{
|
||||
m_intValue = node.attribute(valueAttribute).as_int(0);
|
||||
m_type = Type::Int;
|
||||
return;
|
||||
}
|
||||
else if (attribData == "float")
|
||||
{
|
||||
m_floatValue = node.attribute(valueAttribute).as_float(0.f);
|
||||
m_type = Type::Float;
|
||||
return;
|
||||
}
|
||||
else if (attribData == "string")
|
||||
{
|
||||
m_stringValue = node.attribute(valueAttribute).as_string();
|
||||
|
||||
//if value is empty, try getting the child value instead
|
||||
//as this is how multiline string properties are stored.
|
||||
if(m_stringValue.empty())
|
||||
{
|
||||
m_stringValue = node.child_value();
|
||||
}
|
||||
|
||||
m_type = Type::String;
|
||||
return;
|
||||
}
|
||||
else if (attribData == "color")
|
||||
{
|
||||
m_colourValue = colourFromString(node.attribute(valueAttribute).as_string("#FFFFFFFF"));
|
||||
m_type = Type::Colour;
|
||||
return;
|
||||
}
|
||||
else if (attribData == "file")
|
||||
{
|
||||
m_stringValue = node.attribute(valueAttribute).as_string();
|
||||
m_type = Type::File;
|
||||
return;
|
||||
}
|
||||
else if (attribData == "object")
|
||||
{
|
||||
m_intValue = node.attribute(valueAttribute).as_int(0);
|
||||
m_type = Type::Object;
|
||||
return;
|
||||
}
|
||||
}
|
||||
342
ext/tmxlite/src/TileLayer.cpp
Normal file
342
ext/tmxlite/src/TileLayer.cpp
Normal file
@@ -0,0 +1,342 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "base64.h"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/TileLayer.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
#ifndef USE_ZLIB
|
||||
# include "tmxlite/detail/gzip.hpp"
|
||||
#endif
|
||||
|
||||
#include <pugixml.hpp>
|
||||
#include <zstd.h>
|
||||
#include <sstream>
|
||||
#include <span>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
namespace
|
||||
{
|
||||
struct CompressionType final
|
||||
{
|
||||
enum
|
||||
{
|
||||
Zlib, GZip, Zstd, None
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
TileLayer::TileLayer(std::size_t tileCount)
|
||||
: m_tileCount (tileCount)
|
||||
{
|
||||
m_tiles.reserve(tileCount);
|
||||
}
|
||||
|
||||
//public
|
||||
void TileLayer::parse(const pugi::xml_node& node, Map*)
|
||||
{
|
||||
std::string attribName = node.name();
|
||||
if (attribName != "layer")
|
||||
{
|
||||
Logger::log("node not a layer node, skipped parsing", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
|
||||
setName(node.attribute("name").as_string());
|
||||
setClass(node.attribute("class").as_string());
|
||||
setOpacity(node.attribute("opacity").as_float(1.f));
|
||||
setVisible(node.attribute("visible").as_bool(true));
|
||||
setOffset(node.attribute("offsetx").as_int(0), node.attribute("offsety").as_int(0));
|
||||
setSize(node.attribute("width").as_uint(0), node.attribute("height").as_uint(0));
|
||||
setParallaxFactor(node.attribute("parallaxx").as_float(1.f), node.attribute("parallaxy").as_float(1.f));
|
||||
|
||||
std::string tintColour = node.attribute("tintcolor").as_string();
|
||||
if (!tintColour.empty())
|
||||
{
|
||||
setTintColour(colourFromString(tintColour));
|
||||
}
|
||||
|
||||
for (const auto& child : node.children())
|
||||
{
|
||||
attribName = child.name();
|
||||
if (attribName == "data")
|
||||
{
|
||||
attribName = child.attribute("encoding").as_string();
|
||||
if (attribName == "base64")
|
||||
{
|
||||
parseBase64(child);
|
||||
}
|
||||
else if (attribName == "csv")
|
||||
{
|
||||
parseCSV(child);
|
||||
}
|
||||
else
|
||||
{
|
||||
parseUnencoded(child);
|
||||
}
|
||||
}
|
||||
else if (attribName == "properties")
|
||||
{
|
||||
for (const auto& p : child.children())
|
||||
{
|
||||
addProperty(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//private
|
||||
void TileLayer::parseBase64(const pugi::xml_node& node)
|
||||
{
|
||||
auto processDataString = [](std::string dataString, std::size_t tileCount, std::int32_t compressionType)->std::vector<std::uint32_t>
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << dataString;
|
||||
ss >> dataString;
|
||||
dataString = base64_decode(dataString);
|
||||
|
||||
std::size_t expectedSize = tileCount * 4; //4 bytes per tile
|
||||
std::vector<unsigned char> byteData;
|
||||
byteData.reserve(expectedSize);
|
||||
|
||||
switch (compressionType)
|
||||
{
|
||||
default:
|
||||
byteData.insert(byteData.end(), dataString.begin(), dataString.end());
|
||||
break;
|
||||
case CompressionType::Zstd:
|
||||
{
|
||||
std::size_t dataSize = dataString.length() * sizeof(unsigned char);
|
||||
std::size_t result = ZSTD_decompress(byteData.data(), expectedSize, &dataString[0], dataSize);
|
||||
|
||||
if (ZSTD_isError(result))
|
||||
{
|
||||
std::string err = ZSTD_getErrorName(result);
|
||||
LOG("Failed to decompress layer data, node skipped.\nError: " + err, Logger::Type::Error);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CompressionType::GZip:
|
||||
#ifndef USE_ZLIB
|
||||
{
|
||||
byteData.resize(expectedSize);
|
||||
const auto source = std::span(reinterpret_cast<const uint8_t*>(dataString.data()), dataString.size());
|
||||
|
||||
GZipReader reader;
|
||||
if (!reader.OpenMemory(source) || !reader.Read(byteData) || !reader.Check())
|
||||
{
|
||||
LOG("Failed to decompress layer data, node skipped.", Logger::Type::Error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
//[[fallthrough]];
|
||||
case CompressionType::Zlib:
|
||||
{
|
||||
//unzip
|
||||
std::size_t dataSize = dataString.length() * sizeof(unsigned char);
|
||||
|
||||
if (!decompress(dataString.c_str(), byteData, dataSize, expectedSize))
|
||||
{
|
||||
LOG("Failed to decompress layer data, node skipped.", Logger::Type::Error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
//data stream is in bytes so we need to OR into 32 bit values
|
||||
std::vector<std::uint32_t> IDs;
|
||||
IDs.reserve(tileCount);
|
||||
for (auto i = 0u; i < expectedSize - 3u; i += 4u)
|
||||
{
|
||||
std::uint32_t id = byteData[i] | byteData[i + 1] << 8 | byteData[i + 2] << 16 | byteData[i + 3] << 24;
|
||||
IDs.push_back(id);
|
||||
}
|
||||
|
||||
return IDs;
|
||||
};
|
||||
|
||||
std::int32_t compressionType = CompressionType::None;
|
||||
std::string compression = node.attribute("compression").as_string();
|
||||
if (compression == "gzip")
|
||||
{
|
||||
compressionType = CompressionType::GZip;
|
||||
}
|
||||
else if (compression == "zlib")
|
||||
{
|
||||
compressionType = CompressionType::Zlib;
|
||||
}
|
||||
else if (compression == "zstd")
|
||||
{
|
||||
compressionType = CompressionType::Zstd;
|
||||
}
|
||||
|
||||
std::string data = node.text().as_string();
|
||||
if (data.empty())
|
||||
{
|
||||
//check for chunk nodes
|
||||
auto dataCount = 0;
|
||||
for (const auto& childNode : node.children())
|
||||
{
|
||||
std::string childName = childNode.name();
|
||||
if (childName == "chunk")
|
||||
{
|
||||
std::string dataString = childNode.text().as_string();
|
||||
if (!dataString.empty())
|
||||
{
|
||||
Chunk chunk;
|
||||
chunk.position.x = childNode.attribute("x").as_int();
|
||||
chunk.position.y = childNode.attribute("y").as_int();
|
||||
|
||||
chunk.size.x = childNode.attribute("width").as_int();
|
||||
chunk.size.y = childNode.attribute("height").as_int();
|
||||
|
||||
auto IDs = processDataString(dataString, (chunk.size.x * chunk.size.y), compressionType);
|
||||
|
||||
if (!IDs.empty())
|
||||
{
|
||||
createTiles(IDs, chunk.tiles);
|
||||
m_chunks.push_back(chunk);
|
||||
dataCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dataCount == 0)
|
||||
{
|
||||
Logger::log("Layer " + getName() + " has no layer data. Layer skipped.", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
auto IDs = processDataString(data, m_tileCount, compressionType);
|
||||
createTiles(IDs, m_tiles);
|
||||
}
|
||||
}
|
||||
|
||||
void TileLayer::parseCSV(const pugi::xml_node& node)
|
||||
{
|
||||
auto processDataString = [](const std::string dataString, std::size_t tileCount)->std::vector<std::uint32_t>
|
||||
{
|
||||
std::vector<std::uint32_t> IDs;
|
||||
IDs.reserve(tileCount);
|
||||
|
||||
const char* ptr = dataString.c_str();
|
||||
while (true)
|
||||
{
|
||||
char* end;
|
||||
auto res = std::strtoul(ptr, &end, 10);
|
||||
if (end == ptr) break;
|
||||
ptr = end;
|
||||
IDs.push_back(res);
|
||||
if (*ptr == ',') ++ptr;
|
||||
}
|
||||
|
||||
return IDs;
|
||||
};
|
||||
|
||||
std::string data = node.text().as_string();
|
||||
if (data.empty())
|
||||
{
|
||||
//check for chunk nodes
|
||||
auto dataCount = 0;
|
||||
for (const auto& childNode : node.children())
|
||||
{
|
||||
std::string childName = childNode.name();
|
||||
if (childName == "chunk")
|
||||
{
|
||||
std::string dataString = childNode.text().as_string();
|
||||
if (!dataString.empty())
|
||||
{
|
||||
Chunk chunk;
|
||||
chunk.position.x = childNode.attribute("x").as_int();
|
||||
chunk.position.y = childNode.attribute("y").as_int();
|
||||
|
||||
chunk.size.x = childNode.attribute("width").as_int();
|
||||
chunk.size.y = childNode.attribute("height").as_int();
|
||||
|
||||
auto IDs = processDataString(dataString, chunk.size.x * chunk.size.y);
|
||||
|
||||
if (!IDs.empty())
|
||||
{
|
||||
createTiles(IDs, chunk.tiles);
|
||||
m_chunks.push_back(chunk);
|
||||
dataCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dataCount == 0)
|
||||
{
|
||||
Logger::log("Layer " + getName() + " has no layer data. Layer skipped.", Logger::Type::Error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
createTiles(processDataString(data, m_tileCount), m_tiles);
|
||||
}
|
||||
}
|
||||
|
||||
void TileLayer::parseUnencoded(const pugi::xml_node& node)
|
||||
{
|
||||
std::string attribName;
|
||||
std::vector<std::uint32_t> IDs;
|
||||
IDs.reserve(m_tileCount);
|
||||
|
||||
for (const auto& child : node.children())
|
||||
{
|
||||
attribName = child.name();
|
||||
if (attribName == "tile")
|
||||
{
|
||||
IDs.push_back(child.attribute("gid").as_uint());
|
||||
}
|
||||
}
|
||||
|
||||
createTiles(IDs, m_tiles);
|
||||
}
|
||||
|
||||
void TileLayer::createTiles(const std::vector<std::uint32_t>& IDs, std::vector<Tile>& destination)
|
||||
{
|
||||
//LOG(IDs.size() != m_tileCount, "Layer tile count does not match expected size. Found: "
|
||||
// + std::to_string(IDs.size()) + ", expected: " + std::to_string(m_tileCount));
|
||||
|
||||
static const std::uint32_t mask = 0xf0000000;
|
||||
for (const auto& id : IDs)
|
||||
{
|
||||
destination.emplace_back();
|
||||
destination.back().flipFlags = ((id & mask) >> 28);
|
||||
destination.back().ID = id & ~mask;
|
||||
}
|
||||
}
|
||||
456
ext/tmxlite/src/Tileset.cpp
Normal file
456
ext/tmxlite/src/Tileset.cpp
Normal file
@@ -0,0 +1,456 @@
|
||||
/*********************************************************************
|
||||
Matt Marchant 2016 - 2023
|
||||
http://trederia.blogspot.com
|
||||
|
||||
tmxlite - Zlib license.
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held
|
||||
liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute
|
||||
it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented;
|
||||
you must not claim that you wrote the original software.
|
||||
If you use this software in a product, an acknowledgment
|
||||
in the product documentation would be appreciated but
|
||||
is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such,
|
||||
and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any
|
||||
source distribution.
|
||||
*********************************************************************/
|
||||
|
||||
#include "tmxlite/Tileset.hpp"
|
||||
#include "tmxlite/FreeFuncs.hpp"
|
||||
#include "tmxlite/detail/Log.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
#include <ctype.h>
|
||||
|
||||
using namespace tmx;
|
||||
|
||||
Tileset::Tileset(const std::string& workingDir)
|
||||
: m_workingDir (workingDir),
|
||||
m_firstGID (0),
|
||||
m_spacing (0),
|
||||
m_margin (0),
|
||||
m_tileCount (0),
|
||||
m_columnCount (0),
|
||||
m_objectAlignment (ObjectAlignment::Unspecified),
|
||||
m_transparencyColour (0, 0, 0, 0),
|
||||
m_hasTransparency (false)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
//public
|
||||
void Tileset::parse(pugi::xml_node node, Map* map)
|
||||
{
|
||||
assert(map);
|
||||
|
||||
std::string attribString = node.name();
|
||||
if (attribString != "tileset")
|
||||
{
|
||||
Logger::log(attribString + ": not a tileset node! Node will be skipped.", Logger::Type::Warning);
|
||||
return;
|
||||
}
|
||||
|
||||
m_firstGID = node.attribute("firstgid").as_int();
|
||||
if (m_firstGID == 0)
|
||||
{
|
||||
Logger::log("Invalid first GID in tileset. Tileset node skipped.", Logger::Type::Warning);
|
||||
return;
|
||||
}
|
||||
|
||||
pugi::xml_document tsxDoc; //need to keep this in scope
|
||||
if (node.attribute("source"))
|
||||
{
|
||||
//parse TSX doc
|
||||
std::string path = node.attribute("source").as_string();
|
||||
path = resolveFilePath(path, m_workingDir);
|
||||
|
||||
//as the TSX file now dictates the image path, the working
|
||||
//directory is now that of the tsx file
|
||||
auto position = path.find_last_of('/');
|
||||
if (position != std::string::npos)
|
||||
{
|
||||
m_workingDir = path.substr(0, position);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_workingDir = "";
|
||||
}
|
||||
|
||||
//see if doc can be opened
|
||||
auto result = tsxDoc.load_file(path.c_str());
|
||||
if (!result)
|
||||
{
|
||||
Logger::log(path + ": Failed opening tsx file for tile set, tile set will be skipped", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
//if it can then replace the current node with tsx node
|
||||
node = tsxDoc.child("tileset");
|
||||
if (!node)
|
||||
{
|
||||
Logger::log("tsx file does not contain a tile set node, tile set will be skipped", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
}
|
||||
|
||||
m_name = node.attribute("name").as_string();
|
||||
LOG("found tile set " + m_name, Logger::Type::Info);
|
||||
m_class = node.attribute("class").as_string();
|
||||
|
||||
m_tileSize.x = node.attribute("tilewidth").as_int();
|
||||
m_tileSize.y = node.attribute("tileheight").as_int();
|
||||
if (m_tileSize.x == 0 || m_tileSize.y == 0)
|
||||
{
|
||||
Logger::log("Invalid tile size found in tile set node. Node will be skipped.", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
|
||||
m_spacing = node.attribute("spacing").as_int();
|
||||
m_margin = node.attribute("margin").as_int();
|
||||
m_tileCount = node.attribute("tilecount").as_int();
|
||||
m_columnCount = node.attribute("columns").as_int();
|
||||
|
||||
m_tileIndex.reserve(m_tileCount);
|
||||
m_tiles.reserve(m_tileCount);
|
||||
|
||||
std::string objectAlignment = node.attribute("objectalignment").as_string();
|
||||
if (!objectAlignment.empty())
|
||||
{
|
||||
if (objectAlignment == "unspecified")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::Unspecified;
|
||||
}
|
||||
else if (objectAlignment == "topleft")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::TopLeft;
|
||||
}
|
||||
else if (objectAlignment == "top")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::Top;
|
||||
}
|
||||
else if (objectAlignment == "topright")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::TopRight;
|
||||
}
|
||||
else if (objectAlignment == "left")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::Left;
|
||||
}
|
||||
else if (objectAlignment == "center")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::Center;
|
||||
}
|
||||
else if (objectAlignment == "right")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::Right;
|
||||
}
|
||||
else if (objectAlignment == "bottomleft")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::BottomLeft;
|
||||
}
|
||||
else if (objectAlignment == "bottom")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::Bottom;
|
||||
}
|
||||
else if (objectAlignment == "bottomright")
|
||||
{
|
||||
m_objectAlignment = ObjectAlignment::BottomRight;
|
||||
}
|
||||
}
|
||||
|
||||
const auto& children = node.children();
|
||||
for (const auto& node : children)
|
||||
{
|
||||
std::string name = node.name();
|
||||
if (name == "image")
|
||||
{
|
||||
//TODO this currently doesn't cover embedded images
|
||||
//mostly because I can't figure out how to export them
|
||||
//from the Tiled editor... but also resource handling
|
||||
//should be handled by the renderer, not the parser.
|
||||
attribString = node.attribute("source").as_string();
|
||||
if (attribString.empty())
|
||||
{
|
||||
Logger::log("Tileset image node has missing source property, tile set not loaded", Logger::Type::Error);
|
||||
return reset();
|
||||
}
|
||||
m_imagePath = resolveFilePath(attribString, m_workingDir);
|
||||
if (node.attribute("trans"))
|
||||
{
|
||||
attribString = node.attribute("trans").as_string();
|
||||
m_transparencyColour = colourFromString(attribString);
|
||||
m_hasTransparency = true;
|
||||
}
|
||||
if (node.attribute("width") && node.attribute("height"))
|
||||
{
|
||||
m_imageSize.x = node.attribute("width").as_int();
|
||||
m_imageSize.y = node.attribute("height").as_int();
|
||||
}
|
||||
}
|
||||
else if (name == "tileoffset")
|
||||
{
|
||||
parseOffsetNode(node);
|
||||
}
|
||||
else if (name == "properties")
|
||||
{
|
||||
parsePropertyNode(node);
|
||||
}
|
||||
else if (name == "terraintypes")
|
||||
{
|
||||
parseTerrainNode(node);
|
||||
}
|
||||
else if (name == "tile")
|
||||
{
|
||||
parseTileNode(node, map);
|
||||
}
|
||||
}
|
||||
|
||||
//if the tsx file does not declare every tile, we create the missing ones
|
||||
if (m_tiles.size() != getTileCount())
|
||||
{
|
||||
for (std::uint32_t ID = 0; ID < getTileCount(); ID++)
|
||||
{
|
||||
createMissingTile(ID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::uint32_t Tileset::getLastGID() const
|
||||
{
|
||||
assert(!m_tileIndex.empty());
|
||||
return m_firstGID + static_cast<std::uint32_t>(m_tileIndex.size()) - 1;
|
||||
}
|
||||
|
||||
const Tileset::Tile* Tileset::getTile(std::uint32_t id) const
|
||||
{
|
||||
if (!hasTile(id))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//corrects the ID. Indices and IDs are different.
|
||||
id -= m_firstGID;
|
||||
id = m_tileIndex[id];
|
||||
return id ? &m_tiles[id - 1] : nullptr;
|
||||
}
|
||||
|
||||
//private
|
||||
void Tileset::reset()
|
||||
{
|
||||
m_firstGID = 0;
|
||||
m_source = "";
|
||||
m_name = "";
|
||||
m_class = "";
|
||||
m_tileSize = { 0,0 };
|
||||
m_spacing = 0;
|
||||
m_margin = 0;
|
||||
m_tileCount = 0;
|
||||
m_columnCount = 0;
|
||||
m_objectAlignment = ObjectAlignment::Unspecified;
|
||||
m_tileOffset = { 0,0 };
|
||||
m_properties.clear();
|
||||
m_imagePath = "";
|
||||
m_transparencyColour = { 0, 0, 0, 0 };
|
||||
m_hasTransparency = false;
|
||||
m_terrainTypes.clear();
|
||||
m_tileIndex.clear();
|
||||
m_tiles.clear();
|
||||
}
|
||||
|
||||
void Tileset::parseOffsetNode(const pugi::xml_node& node)
|
||||
{
|
||||
m_tileOffset.x = node.attribute("x").as_int();
|
||||
m_tileOffset.y = node.attribute("y").as_int();
|
||||
}
|
||||
|
||||
void Tileset::parsePropertyNode(const pugi::xml_node& node)
|
||||
{
|
||||
const auto& children = node.children();
|
||||
for (const auto& child : children)
|
||||
{
|
||||
m_properties.emplace_back();
|
||||
m_properties.back().parse(child);
|
||||
}
|
||||
}
|
||||
|
||||
void Tileset::parseTerrainNode(const pugi::xml_node& node)
|
||||
{
|
||||
const auto& children = node.children();
|
||||
for (const auto& child : children)
|
||||
{
|
||||
std::string name = child.name();
|
||||
if (name == "terrain")
|
||||
{
|
||||
m_terrainTypes.emplace_back();
|
||||
auto& terrain = m_terrainTypes.back();
|
||||
terrain.name = child.attribute("name").as_string();
|
||||
terrain.tileID = child.attribute("tile").as_int();
|
||||
auto properties = child.child("properties");
|
||||
if (properties)
|
||||
{
|
||||
for (const auto& p : properties)
|
||||
{
|
||||
name = p.name();
|
||||
if (name == "property")
|
||||
{
|
||||
terrain.properties.emplace_back();
|
||||
terrain.properties.back().parse(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Tileset::Tile& Tileset::newTile(std::uint32_t ID)
|
||||
{
|
||||
Tile& tile = (m_tiles.emplace_back(), m_tiles.back());
|
||||
if (m_tileIndex.size() <= ID)
|
||||
{
|
||||
m_tileIndex.resize(ID + 1, 0);
|
||||
}
|
||||
|
||||
m_tileIndex[ID] = static_cast<std::uint32_t>(m_tiles.size());
|
||||
tile.ID = ID;
|
||||
return tile;
|
||||
}
|
||||
|
||||
void Tileset::parseTileNode(const pugi::xml_node& node, Map* map)
|
||||
{
|
||||
assert(map);
|
||||
|
||||
Tile& tile = newTile(node.attribute("id").as_int());
|
||||
if (node.attribute("terrain"))
|
||||
{
|
||||
std::string data = node.attribute("terrain").as_string();
|
||||
bool lastWasChar = true;
|
||||
std::size_t idx = 0u;
|
||||
for (auto i = 0u; i < data.size() && idx < tile.terrainIndices.size(); ++i)
|
||||
{
|
||||
if (isdigit(data[i]))
|
||||
{
|
||||
tile.terrainIndices[idx++] = std::atoi(&data[i]);
|
||||
lastWasChar = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!lastWasChar)
|
||||
{
|
||||
lastWasChar = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
tile.terrainIndices[idx++] = -1;
|
||||
lastWasChar = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (lastWasChar)
|
||||
{
|
||||
tile.terrainIndices[idx] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
tile.probability = node.attribute("probability").as_int(100);
|
||||
|
||||
tile.className = node.attribute("type").as_string();
|
||||
if (tile.className.empty())
|
||||
{
|
||||
tile.className = node.attribute("class").as_string();
|
||||
}
|
||||
|
||||
//by default we set the tile's values as in an Image tileset
|
||||
tile.imagePath = m_imagePath;
|
||||
tile.imageSize = m_tileSize;
|
||||
|
||||
if (m_columnCount != 0)
|
||||
{
|
||||
std::uint32_t rowIndex = tile.ID % m_columnCount;
|
||||
std::uint32_t columnIndex = tile.ID / m_columnCount;
|
||||
tile.imagePosition.x = m_margin + rowIndex * (m_tileSize.x + m_spacing);
|
||||
tile.imagePosition.y = m_margin + columnIndex * (m_tileSize.y + m_spacing);
|
||||
}
|
||||
|
||||
const auto& children = node.children();
|
||||
for (const auto& child : children)
|
||||
{
|
||||
std::string name = child.name();
|
||||
if (name == "properties")
|
||||
{
|
||||
for (const auto& prop : child.children())
|
||||
{
|
||||
tile.properties.emplace_back();
|
||||
tile.properties.back().parse(prop);
|
||||
}
|
||||
}
|
||||
else if (name == "objectgroup")
|
||||
{
|
||||
tile.objectGroup.parse(child, map);
|
||||
}
|
||||
else if (name == "image")
|
||||
{
|
||||
std::string attribString = child.attribute("source").as_string();
|
||||
if (attribString.empty())
|
||||
{
|
||||
Logger::log("Tile image path missing", Logger::Type::Warning);
|
||||
continue;
|
||||
}
|
||||
tile.imagePath = resolveFilePath(attribString, m_workingDir);
|
||||
|
||||
tile.imagePosition = tmx::Vector2u(0, 0);
|
||||
|
||||
if (child.attribute("trans"))
|
||||
{
|
||||
attribString = child.attribute("trans").as_string();
|
||||
m_transparencyColour = colourFromString(attribString);
|
||||
m_hasTransparency = true;
|
||||
}
|
||||
if (child.attribute("width"))
|
||||
{
|
||||
tile.imageSize.x = child.attribute("width").as_uint();
|
||||
}
|
||||
if (child.attribute("height"))
|
||||
{
|
||||
tile.imageSize.y = child.attribute("height").as_uint();
|
||||
}
|
||||
}
|
||||
else if (name == "animation")
|
||||
{
|
||||
for (const auto& frameNode : child.children())
|
||||
{
|
||||
Tile::Animation::Frame frame;
|
||||
frame.duration = frameNode.attribute("duration").as_int();
|
||||
frame.tileID = frameNode.attribute("tileid").as_int() + m_firstGID;
|
||||
tile.animation.frames.push_back(frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Tileset::createMissingTile(std::uint32_t ID)
|
||||
{
|
||||
//first, we check if the tile does not yet exist
|
||||
if (m_tileIndex.size() > ID && m_tileIndex[ID])
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
Tile& tile = newTile(ID);
|
||||
tile.imagePath = m_imagePath;
|
||||
tile.imageSize = m_tileSize;
|
||||
|
||||
std::uint32_t rowIndex = ID % m_columnCount;
|
||||
std::uint32_t columnIndex = ID / m_columnCount;
|
||||
tile.imagePosition.x = m_margin + rowIndex * (m_tileSize.x + m_spacing);
|
||||
tile.imagePosition.y = m_margin + columnIndex * (m_tileSize.y + m_spacing);
|
||||
}
|
||||
125
ext/tmxlite/src/detail/gzip.cpp
Normal file
125
ext/tmxlite/src/detail/gzip.cpp
Normal file
@@ -0,0 +1,125 @@
|
||||
// gzip.cpp - portable memory miniz based gzip reader
|
||||
// SPDX-License-Identifier: Zlib
|
||||
// SPDX-FileCopyrightText: (c) 2024 a dinosaur
|
||||
|
||||
#include "tmxlite/detail/gzip.hpp"
|
||||
#include <string_view>
|
||||
|
||||
|
||||
GZipReader::GZipReader() noexcept :
|
||||
mSourceLen(0), mBytesRead(0),
|
||||
mModificationTime(0), mCrc(0), mInputSize(0), mComputedCrc(0),
|
||||
crc16(0), mFlags(0), mXflags(0), mOsId(0)
|
||||
{
|
||||
tinfl_init(&mState);
|
||||
mComputedCrc = static_cast<uint32_t>(mz_crc32(0, nullptr, 0));
|
||||
}
|
||||
|
||||
bool GZipReader::OpenMemory(const std::span<const uint8_t> source) noexcept
|
||||
{
|
||||
if (source.size() < 20)
|
||||
return false;
|
||||
|
||||
auto it = std::cbegin(source), end = std::cend(source);
|
||||
|
||||
constexpr uint8_t magic[2] = { 0x1F, 0x8B };
|
||||
if (*it++ != magic[0] || *it++ != magic[1])
|
||||
return false;
|
||||
|
||||
constexpr uint8_t CM_DEFLATE = 8;
|
||||
uint8_t compression = *it++;
|
||||
if (compression != CM_DEFLATE)
|
||||
return false;
|
||||
|
||||
mFlags = *it++;
|
||||
mModificationTime = *it++;
|
||||
mModificationTime |= *it++ << 8;
|
||||
mModificationTime |= *it++ << 16;
|
||||
mModificationTime |= *it++ << 24;
|
||||
mXflags = *it++;
|
||||
mOsId = *it++;
|
||||
|
||||
if (mFlags & FEXTRA)
|
||||
{
|
||||
// Skip "extra" field
|
||||
if (it + 2 >= end)
|
||||
return false;
|
||||
uint16_t extraLen = *it++;
|
||||
extraLen = *it++ << 8;
|
||||
if (it + extraLen >= end)
|
||||
return false;
|
||||
it += extraLen;
|
||||
}
|
||||
if (mFlags & FNAME)
|
||||
{
|
||||
// Skip null-terminated name string
|
||||
do
|
||||
{
|
||||
if (++it == end)
|
||||
return false;
|
||||
} while (*it != '\0');
|
||||
if (++it == end)
|
||||
return false;
|
||||
}
|
||||
if (mFlags & FCOMMENT)
|
||||
{
|
||||
// Skip null-terminated comment string
|
||||
do
|
||||
{
|
||||
if (++it == end)
|
||||
return false;
|
||||
} while (*it != '\0');
|
||||
if (++it == end)
|
||||
return false;
|
||||
}
|
||||
if (mFlags & FHCRC)
|
||||
{
|
||||
if (it + 2 >= end)
|
||||
return false;
|
||||
crc16 = *it++;
|
||||
crc16 |= *it++;
|
||||
}
|
||||
|
||||
mIt = it;
|
||||
mSourceLen = end - it - 8;
|
||||
|
||||
it += mSourceLen;
|
||||
mCrc = *it++;
|
||||
mCrc |= *it++ << 8;
|
||||
mCrc |= *it++ << 16;
|
||||
mCrc |= *it++ << 24;
|
||||
mInputSize = *it++;
|
||||
mInputSize |= *it++ << 8;
|
||||
mInputSize |= *it++ << 16;
|
||||
mInputSize |= *it++ << 24;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GZipReader::Read(std::span<uint8_t> out) noexcept
|
||||
{
|
||||
size_t outLen = out.size();
|
||||
auto res = tinfl_decompress(&mState,
|
||||
static_cast<const mz_uint8*>(&*mIt), &mSourceLen,
|
||||
static_cast<mz_uint8*>(out.data()), static_cast<mz_uint8*>(out.data()), &outLen,
|
||||
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
|
||||
if (res != TINFL_STATUS_DONE)
|
||||
return false;
|
||||
|
||||
mIt += outLen;
|
||||
mBytesRead += outLen;
|
||||
mComputedCrc = static_cast<uint32_t>(mz_crc32(static_cast<mz_ulong>(mComputedCrc), out.data(), outLen));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GZipReader::Check() const noexcept
|
||||
{
|
||||
if (mComputedCrc != mCrc)
|
||||
return false;
|
||||
|
||||
if (static_cast<uint32_t>(mBytesRead & UINT32_MAX) != mInputSize)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
145
ext/zstd/CMakeLists.txt
Normal file
145
ext/zstd/CMakeLists.txt
Normal file
@@ -0,0 +1,145 @@
|
||||
# ################################################################
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under both the BSD-style license (found in the
|
||||
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
# in the COPYING file in the root directory of this source tree).
|
||||
# ################################################################
|
||||
|
||||
set(LIBRARY_DIR ${CMAKE_CURRENT_SOURCE_DIR}/lib)
|
||||
|
||||
# Parse version
|
||||
file(READ ${LIBRARY_DIR}/zstd.h CONTENT) # Read file content
|
||||
|
||||
string(REGEX MATCH ".*define ZSTD_VERSION_MAJOR *([0-9]+).*define ZSTD_VERSION_MINOR *([0-9]+).*define ZSTD_VERSION_RELEASE *([0-9]+)" VERSION_REGEX "${CONTENT}")
|
||||
set(zstd_VERSION_MAJOR ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||
set(zstd_VERSION_MINOR ${CMAKE_MATCH_2} PARENT_SCOPE)
|
||||
set(zstd_VERSION_PATCH ${CMAKE_MATCH_3} PARENT_SCOPE)
|
||||
|
||||
enable_language(ASM)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Add extra compilation flags
|
||||
#-----------------------------------------------------------------------------
|
||||
function (add_zstd_compilation_flags _target)
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang" OR MINGW)
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND MSVC)
|
||||
# clang-cl normally maps -Wall to -Weverything.
|
||||
target_compile_options(${_target} PRIVATE "/clang:-Wall")
|
||||
else()
|
||||
target_compile_options(${_target} PRIVATE -Wall)
|
||||
endif()
|
||||
target_compile_options(${_target} PRIVATE -Wextra -Wundef -Wshadow -Wcast-align -Wcast-qual)
|
||||
target_compile_options(${_target} PRIVATE $<$<COMPILE_LANGUAGE:CXX>:-Wstrict-prototypes>)
|
||||
# Enable asserts in Debug mode
|
||||
if (CMAKE_BUILD_TYPE MATCHES "Debug")
|
||||
target_compile_options(${_target} PRIVATE DEBUGLEVEL=1)
|
||||
endif()
|
||||
# Add noexecstack flags
|
||||
target_link_options(${_target} PRIVATE -z noexecstack) # LDFLAGS
|
||||
target_compile_options(${_target} PRIVATE -Wunused-parameter -Wa,--noexecstack) # CFLAGS & CXXFLAGS
|
||||
elseif (MSVC)
|
||||
# UNICODE SUPPORT
|
||||
target_compile_definitions(${_target} PRIVATE _UNICODE UNICODE)
|
||||
# Enable asserts in Debug mode
|
||||
if (CMAKE_BUILD_TYPE MATCHES "Debug")
|
||||
target_compile_definitions(${_target} PRIVATE DEBUGLEVEL=1)
|
||||
endif()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Legacy support
|
||||
option(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" ON)
|
||||
|
||||
if (ZSTD_LEGACY_SUPPORT)
|
||||
set(ZSTD_LEGACY_LEVEL 5 CACHE STRING "")
|
||||
endif()
|
||||
|
||||
# Multi-threading support
|
||||
if (ANDROID)
|
||||
set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT OFF)
|
||||
else()
|
||||
set(ZSTD_MULTITHREAD_SUPPORT_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ${ZSTD_MULTITHREAD_SUPPORT_DEFAULT})
|
||||
|
||||
file(GLOB CommonSources ${LIBRARY_DIR}/common/*.c)
|
||||
file(GLOB CompressSources ${LIBRARY_DIR}/compress/*.c)
|
||||
if (MSVC)
|
||||
file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c)
|
||||
else()
|
||||
file(GLOB DecompressSources ${LIBRARY_DIR}/decompress/*.c ${LIBRARY_DIR}/decompress/*.S)
|
||||
endif()
|
||||
file(GLOB DictBuilderSources ${LIBRARY_DIR}/dictBuilder/*.c)
|
||||
|
||||
set(Sources
|
||||
${CommonSources}
|
||||
${CompressSources}
|
||||
${DecompressSources}
|
||||
${DictBuilderSources})
|
||||
|
||||
file(GLOB CommonHeaders ${LIBRARY_DIR}/common/*.h)
|
||||
file(GLOB CompressHeaders ${LIBRARY_DIR}/compress/*.h)
|
||||
file(GLOB DecompressHeaders ${LIBRARY_DIR}/decompress/*.h)
|
||||
file(GLOB DictBuilderHeaders ${LIBRARY_DIR}/dictBuilder/*.h)
|
||||
|
||||
set(Headers
|
||||
${LIBRARY_DIR}/zstd.h
|
||||
${CommonHeaders}
|
||||
${CompressHeaders}
|
||||
${DecompressHeaders}
|
||||
${DictBuilderHeaders})
|
||||
|
||||
if (ZSTD_LEGACY_SUPPORT)
|
||||
foreach (SOURCE zstd_v01.c zstd_v02.c zstd_v03.c zstd_v04.c zstd_v05.c zstd_v06.c zstd_v07.c)
|
||||
list(APPEND Sources ${LIBRARY_DIR}/legacy/${SOURCE})
|
||||
endforeach()
|
||||
foreach (HEADER zstd_legacy.h zstd_v01.h zstd_v02.h zstd_v03.h zstd_v04.h zstd_v05.h zstd_v06.h zstd_v07.h)
|
||||
list(APPEND Headers ${LIBRARY_DIR}/legacy/${HEADER})
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
# Explicitly set the language to C for all files, including ASM files.
|
||||
# Our assembly expects to be compiled by a C compiler, and is only enabled for
|
||||
# __GNUC__ compatible compilers. Otherwise all the ASM code is disabled by
|
||||
# macros.
|
||||
set_source_files_properties(${Sources} PROPERTIES LANGUAGE C)
|
||||
|
||||
add_library(zstd STATIC ${Sources} ${Headers})
|
||||
add_library(Zstd::static ALIAS zstd)
|
||||
add_library(Zstd::Zstd ALIAS zstd)
|
||||
|
||||
add_zstd_compilation_flags(zstd)
|
||||
|
||||
# Define library directory, where sources and header files are located
|
||||
target_include_directories(zstd PUBLIC ${LIBRARY_DIR})
|
||||
target_include_directories(zstd PRIVATE ${LIBRARY_DIR}/common)
|
||||
|
||||
if (ZSTD_LEGACY_SUPPORT)
|
||||
target_include_directories(zstd PRIVATE ${LIBRARY_DIR}/legacy)
|
||||
target_compile_definitions(zstd PRIVATE ZSTD_LEGACY_SUPPORT=${ZSTD_LEGACY_LEVEL})
|
||||
else()
|
||||
target_compile_definitions(zstd PRIVATE ZSTD_LEGACY_SUPPORT=0)
|
||||
endif()
|
||||
|
||||
if (ZSTD_MULTITHREAD_SUPPORT)
|
||||
target_compile_definitions(zstd PRIVATE ZSTD_MULTITHREAD)
|
||||
if (UNIX)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
target_link_libraries(zstd Threads::Threads)
|
||||
if (NOT CMAKE_USE_PTHREADS_INIT)
|
||||
message(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Add specific compile definitions for MSVC project
|
||||
if (MSVC)
|
||||
target_compile_definitions(zstd PRIVATE ZSTD_HEAPMODE=0 ZSTD_DISABLE_ASM _CRT_SECURE_NO_WARNINGS)
|
||||
endif()
|
||||
|
||||
# Define static library names
|
||||
set_property(TARGET zstd PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
30
ext/zstd/LICENSE
Normal file
30
ext/zstd/LICENSE
Normal file
@@ -0,0 +1,30 @@
|
||||
BSD License
|
||||
|
||||
For Zstandard software
|
||||
|
||||
Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook, nor Meta, nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
55
ext/zstd/lib/common/allocations.h
Normal file
55
ext/zstd/lib/common/allocations.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* This file provides custom allocation primitives
|
||||
*/
|
||||
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
|
||||
|
||||
#include "mem.h" /* MEM_STATIC */
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "../zstd.h" /* ZSTD_customMem */
|
||||
|
||||
#ifndef ZSTD_ALLOCATIONS_H
|
||||
#define ZSTD_ALLOCATIONS_H
|
||||
|
||||
/* custom memory allocation functions */
|
||||
|
||||
MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc)
|
||||
return customMem.customAlloc(customMem.opaque, size);
|
||||
return ZSTD_malloc(size);
|
||||
}
|
||||
|
||||
MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
|
||||
{
|
||||
if (customMem.customAlloc) {
|
||||
/* calloc implemented as malloc+memset;
|
||||
* not as efficient as calloc, but next best guess for custom malloc */
|
||||
void* const ptr = customMem.customAlloc(customMem.opaque, size);
|
||||
ZSTD_memset(ptr, 0, size);
|
||||
return ptr;
|
||||
}
|
||||
return ZSTD_calloc(1, size);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
|
||||
{
|
||||
if (ptr!=NULL) {
|
||||
if (customMem.customFree)
|
||||
customMem.customFree(customMem.opaque, ptr);
|
||||
else
|
||||
ZSTD_free(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ZSTD_ALLOCATIONS_H */
|
||||
200
ext/zstd/lib/common/bits.h
Normal file
200
ext/zstd/lib/common/bits.h
Normal file
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_BITS_H
|
||||
#define ZSTD_BITS_H
|
||||
|
||||
#include "mem.h"
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
{
|
||||
static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3,
|
||||
30, 22, 20, 15, 25, 17, 4, 8,
|
||||
31, 27, 13, 23, 21, 19, 16, 7,
|
||||
26, 12, 18, 6, 11, 5, 10, 9};
|
||||
return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_tzcnt_u32(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanForward(&r, val);
|
||||
return (unsigned)r;
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (unsigned)__builtin_ctz(val);
|
||||
# else
|
||||
return ZSTD_countTrailingZeros32_fallback(val);
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) {
|
||||
assert(val != 0);
|
||||
{
|
||||
static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29,
|
||||
11, 14, 16, 18, 22, 25, 3, 30,
|
||||
8, 12, 20, 28, 15, 17, 24, 7,
|
||||
19, 27, 23, 6, 26, 5, 4, 31};
|
||||
val |= val >> 1;
|
||||
val |= val >> 2;
|
||||
val |= val >> 4;
|
||||
val |= val >> 8;
|
||||
val |= val >> 16;
|
||||
return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_lzcnt_u32(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanReverse(&r, val);
|
||||
return (unsigned)(31 - r);
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (unsigned)__builtin_clz(val);
|
||||
# else
|
||||
return ZSTD_countLeadingZeros32_fallback(val);
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_tzcnt_u64(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanForward64(&r, val);
|
||||
return (unsigned)r;
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__)
|
||||
return (unsigned)__builtin_ctzll(val);
|
||||
# else
|
||||
{
|
||||
U32 mostSignificantWord = (U32)(val >> 32);
|
||||
U32 leastSignificantWord = (U32)val;
|
||||
if (leastSignificantWord == 0) {
|
||||
return 32 + ZSTD_countTrailingZeros32(mostSignificantWord);
|
||||
} else {
|
||||
return ZSTD_countTrailingZeros32(leastSignificantWord);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val)
|
||||
{
|
||||
assert(val != 0);
|
||||
# if defined(_MSC_VER) && defined(_WIN64)
|
||||
# if STATIC_BMI2 == 1
|
||||
return (unsigned)_lzcnt_u64(val);
|
||||
# else
|
||||
if (val != 0) {
|
||||
unsigned long r;
|
||||
_BitScanReverse64(&r, val);
|
||||
return (unsigned)(63 - r);
|
||||
} else {
|
||||
/* Should not reach this code path */
|
||||
__assume(0);
|
||||
}
|
||||
# endif
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
return (unsigned)(__builtin_clzll(val));
|
||||
# else
|
||||
{
|
||||
U32 mostSignificantWord = (U32)(val >> 32);
|
||||
U32 leastSignificantWord = (U32)val;
|
||||
if (mostSignificantWord == 0) {
|
||||
return 32 + ZSTD_countLeadingZeros32(leastSignificantWord);
|
||||
} else {
|
||||
return ZSTD_countLeadingZeros32(mostSignificantWord);
|
||||
}
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
if (MEM_64bits()) {
|
||||
return ZSTD_countTrailingZeros64((U64)val) >> 3;
|
||||
} else {
|
||||
return ZSTD_countTrailingZeros32((U32)val) >> 3;
|
||||
}
|
||||
} else { /* Big Endian CPU */
|
||||
if (MEM_64bits()) {
|
||||
return ZSTD_countLeadingZeros64((U64)val) >> 3;
|
||||
} else {
|
||||
return ZSTD_countLeadingZeros32((U32)val) >> 3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
|
||||
{
|
||||
assert(val != 0);
|
||||
return 31 - ZSTD_countLeadingZeros32(val);
|
||||
}
|
||||
|
||||
/* ZSTD_rotateRight_*():
|
||||
* Rotates a bitfield to the right by "count" bits.
|
||||
* https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
|
||||
*/
|
||||
MEM_STATIC
|
||||
U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
|
||||
assert(count < 64);
|
||||
count &= 0x3F; /* for fickle pattern recognition */
|
||||
return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
|
||||
}
|
||||
|
||||
MEM_STATIC
|
||||
U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
|
||||
assert(count < 32);
|
||||
count &= 0x1F; /* for fickle pattern recognition */
|
||||
return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
|
||||
}
|
||||
|
||||
MEM_STATIC
|
||||
U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
|
||||
assert(count < 16);
|
||||
count &= 0x0F; /* for fickle pattern recognition */
|
||||
return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
|
||||
}
|
||||
|
||||
#endif /* ZSTD_BITS_H */
|
||||
437
ext/zstd/lib/common/bitstream.h
Normal file
437
ext/zstd/lib/common/bitstream.h
Normal file
@@ -0,0 +1,437 @@
|
||||
/* ******************************************************************
|
||||
* bitstream
|
||||
* Part of FSE library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
#ifndef BITSTREAM_H_MODULE
|
||||
#define BITSTREAM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* This API consists of small unitary functions, which must be inlined for best performance.
|
||||
* Since link-time-optimization is not available for all compilers,
|
||||
* these functions are defined into a .h to be included.
|
||||
*/
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "mem.h" /* unaligned access routines */
|
||||
#include "compiler.h" /* UNLIKELY() */
|
||||
#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
|
||||
#include "error_private.h" /* error codes and messages */
|
||||
#include "bits.h" /* ZSTD_highbit32 */
|
||||
|
||||
|
||||
/*=========================================
|
||||
* Target specific
|
||||
=========================================*/
|
||||
#ifndef ZSTD_NO_INTRINSICS
|
||||
# if (defined(__BMI__) || defined(__BMI2__)) && defined(__GNUC__)
|
||||
# include <immintrin.h> /* support for bextr (experimental)/bzhi */
|
||||
# elif defined(__ICCARM__)
|
||||
# include <intrinsics.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define STREAM_ACCUMULATOR_MIN_32 25
|
||||
#define STREAM_ACCUMULATOR_MIN_64 57
|
||||
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
|
||||
|
||||
|
||||
/*-******************************************
|
||||
* bitStream encoding API (write forward)
|
||||
********************************************/
|
||||
/* bitStream can mix input from multiple sources.
|
||||
* A critical property of these streams is that they encode and decode in **reverse** direction.
|
||||
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
|
||||
*/
|
||||
typedef struct {
|
||||
size_t bitContainer;
|
||||
unsigned bitPos;
|
||||
char* startPtr;
|
||||
char* ptr;
|
||||
char* endPtr;
|
||||
} BIT_CStream_t;
|
||||
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
|
||||
|
||||
/* Start with initCStream, providing the size of buffer to write into.
|
||||
* bitStream will never write outside of this buffer.
|
||||
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
|
||||
*
|
||||
* bits are first added to a local register.
|
||||
* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
|
||||
* Writing data into memory is an explicit operation, performed by the flushBits function.
|
||||
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
|
||||
* After a flushBits, a maximum of 7 bits might still be stored into local register.
|
||||
*
|
||||
* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
|
||||
*
|
||||
* Last operation is to close the bitStream.
|
||||
* The function returns the final size of CStream in bytes.
|
||||
* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
|
||||
*/
|
||||
|
||||
|
||||
/*-********************************************
|
||||
* bitStream decoding API (read backward)
|
||||
**********************************************/
|
||||
typedef struct {
|
||||
size_t bitContainer;
|
||||
unsigned bitsConsumed;
|
||||
const char* ptr;
|
||||
const char* start;
|
||||
const char* limitPtr;
|
||||
} BIT_DStream_t;
|
||||
|
||||
typedef enum { BIT_DStream_unfinished = 0,
|
||||
BIT_DStream_endOfBuffer = 1,
|
||||
BIT_DStream_completed = 2,
|
||||
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
|
||||
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
|
||||
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
|
||||
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
|
||||
|
||||
|
||||
/* Start by invoking BIT_initDStream().
|
||||
* A chunk of the bitStream is then stored into a local register.
|
||||
* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
* You can then retrieve bitFields stored into the local register, **in reverse order**.
|
||||
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
|
||||
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
|
||||
* Otherwise, it can be less than that, so proceed accordingly.
|
||||
* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
|
||||
*/
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* unsafe API
|
||||
******************************************/
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
|
||||
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
|
||||
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
|
||||
/* unsafe version; does not check buffer overflow */
|
||||
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
|
||||
/* faster, but works only if nbBits >= 1 */
|
||||
|
||||
/*===== Local Constants =====*/
|
||||
static const unsigned BIT_mask[] = {
|
||||
0, 1, 3, 7, 0xF, 0x1F,
|
||||
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
|
||||
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
|
||||
0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
|
||||
0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
|
||||
0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
|
||||
#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
|
||||
|
||||
/*-**************************************************************
|
||||
* bitStream encoding
|
||||
****************************************************************/
|
||||
/*! BIT_initCStream() :
|
||||
* `dstCapacity` must be > sizeof(size_t)
|
||||
* @return : 0 if success,
|
||||
* otherwise an error code (can be tested using ERR_isError()) */
|
||||
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
|
||||
void* startPtr, size_t dstCapacity)
|
||||
{
|
||||
bitC->bitContainer = 0;
|
||||
bitC->bitPos = 0;
|
||||
bitC->startPtr = (char*)startPtr;
|
||||
bitC->ptr = bitC->startPtr;
|
||||
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
|
||||
if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
|
||||
{
|
||||
#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS)
|
||||
return _bzhi_u64(bitContainer, nbBits);
|
||||
#else
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
return bitContainer & BIT_mask[nbBits];
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_addBits() :
|
||||
* can add up to 31 bits into `bitC`.
|
||||
* Note : does not check for register overflow ! */
|
||||
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_addBitsFast() :
|
||||
* works only if `value` is _clean_,
|
||||
* meaning all high bits above nbBits are 0 */
|
||||
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
|
||||
size_t value, unsigned nbBits)
|
||||
{
|
||||
assert((value>>nbBits) == 0);
|
||||
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
bitC->bitContainer |= value << bitC->bitPos;
|
||||
bitC->bitPos += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_flushBitsFast() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* unsafe version; does not check buffer overflow */
|
||||
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_flushBits() :
|
||||
* assumption : bitContainer has not overflowed
|
||||
* safe version; check for buffer overflow, and prevents it.
|
||||
* note : does not signal buffer overflow.
|
||||
* overflow will be revealed later on using BIT_closeCStream() */
|
||||
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
|
||||
{
|
||||
size_t const nbBytes = bitC->bitPos >> 3;
|
||||
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
|
||||
assert(bitC->ptr <= bitC->endPtr);
|
||||
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
|
||||
bitC->ptr += nbBytes;
|
||||
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
|
||||
bitC->bitPos &= 7;
|
||||
bitC->bitContainer >>= nbBytes*8;
|
||||
}
|
||||
|
||||
/*! BIT_closeCStream() :
|
||||
* @return : size of CStream, in bytes,
|
||||
* or 0 if it could not fit into dstBuffer */
|
||||
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
|
||||
{
|
||||
BIT_addBitsFast(bitC, 1, 1); /* endMark */
|
||||
BIT_flushBits(bitC);
|
||||
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
|
||||
return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
|
||||
}
|
||||
|
||||
|
||||
/*-********************************************************
|
||||
* bitStream decoding
|
||||
**********************************************************/
|
||||
/*! BIT_initDStream() :
|
||||
* Initialize a BIT_DStream_t.
|
||||
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
|
||||
* `srcSize` must be the *exact* size of the bitStream, in bytes.
|
||||
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
|
||||
*/
|
||||
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
|
||||
{
|
||||
if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
|
||||
|
||||
bitD->start = (const char*)srcBuffer;
|
||||
bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
|
||||
|
||||
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
|
||||
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
|
||||
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
|
||||
} else {
|
||||
bitD->ptr = bitD->start;
|
||||
bitD->bitContainer = *(const BYTE*)(bitD->start);
|
||||
switch(srcSize)
|
||||
{
|
||||
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
|
||||
ZSTD_FALLTHROUGH;
|
||||
|
||||
default: break;
|
||||
}
|
||||
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
|
||||
bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
|
||||
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
|
||||
}
|
||||
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
|
||||
}
|
||||
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
|
||||
{
|
||||
return bitContainer >> start;
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
|
||||
{
|
||||
U32 const regMask = sizeof(bitContainer)*8 - 1;
|
||||
/* if start > regMask, bitstream is corrupted, and result is undefined */
|
||||
assert(nbBits < BIT_MASK_SIZE);
|
||||
/* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
|
||||
* than accessing memory. When bmi2 instruction is not present, we consider
|
||||
* such cpus old (pre-Haswell, 2013) and their performance is not of that
|
||||
* importance.
|
||||
*/
|
||||
#if defined(__x86_64__) || defined(_M_X86)
|
||||
return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
|
||||
#else
|
||||
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_lookBits() :
|
||||
* Provides next n bits from local register.
|
||||
* local register is not modified.
|
||||
* On 32-bits, maxNbBits==24.
|
||||
* On 64-bits, maxNbBits==56.
|
||||
* @return : value extracted */
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
/* arbitrate between double-shift and shift+mask */
|
||||
#if 1
|
||||
/* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
|
||||
* bitstream is likely corrupted, and result is undefined */
|
||||
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
|
||||
#else
|
||||
/* this code path is slower on my os-x laptop */
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*! BIT_lookBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
|
||||
assert(nbBits >= 1);
|
||||
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
|
||||
}
|
||||
|
||||
MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
|
||||
{
|
||||
bitD->bitsConsumed += nbBits;
|
||||
}
|
||||
|
||||
/*! BIT_readBits() :
|
||||
* Read (consume) next n bits from local register and update.
|
||||
* Pay attention to not read more than nbBits contained into local register.
|
||||
* @return : extracted value. */
|
||||
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBits(bitD, nbBits);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_readBitsFast() :
|
||||
* unsafe version; only works if nbBits >= 1 */
|
||||
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
|
||||
{
|
||||
size_t const value = BIT_lookBitsFast(bitD, nbBits);
|
||||
assert(nbBits >= 1);
|
||||
BIT_skipBits(bitD, nbBits);
|
||||
return value;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStreamFast() :
|
||||
* Similar to BIT_reloadDStream(), but with two differences:
|
||||
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
|
||||
* 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
|
||||
* point you must use BIT_reloadDStream() to reload.
|
||||
*/
|
||||
MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (UNLIKELY(bitD->ptr < bitD->limitPtr))
|
||||
return BIT_DStream_overflow;
|
||||
assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
|
||||
bitD->ptr -= bitD->bitsConsumed >> 3;
|
||||
bitD->bitsConsumed &= 7;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr);
|
||||
return BIT_DStream_unfinished;
|
||||
}
|
||||
|
||||
/*! BIT_reloadDStream() :
|
||||
* Refill `bitD` from buffer previously set in BIT_initDStream() .
|
||||
* This function is safe, it guarantees it will not read beyond src buffer.
|
||||
* @return : status of `BIT_DStream_t` internal register.
|
||||
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
|
||||
MEM_STATIC FORCE_INLINE_ATTR BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
|
||||
{
|
||||
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
|
||||
return BIT_DStream_overflow;
|
||||
|
||||
if (bitD->ptr >= bitD->limitPtr) {
|
||||
return BIT_reloadDStreamFast(bitD);
|
||||
}
|
||||
if (bitD->ptr == bitD->start) {
|
||||
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
|
||||
return BIT_DStream_completed;
|
||||
}
|
||||
/* start < ptr < limitPtr */
|
||||
{ U32 nbBytes = bitD->bitsConsumed >> 3;
|
||||
BIT_DStream_status result = BIT_DStream_unfinished;
|
||||
if (bitD->ptr - nbBytes < bitD->start) {
|
||||
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
|
||||
result = BIT_DStream_endOfBuffer;
|
||||
}
|
||||
bitD->ptr -= nbBytes;
|
||||
bitD->bitsConsumed -= nbBytes*8;
|
||||
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/*! BIT_endOfDStream() :
|
||||
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
|
||||
*/
|
||||
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
|
||||
{
|
||||
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BITSTREAM_H_MODULE */
|
||||
358
ext/zstd/lib/common/compiler.h
Normal file
358
ext/zstd/lib/common/compiler.h
Normal file
@@ -0,0 +1,358 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPILER_H
|
||||
#define ZSTD_COMPILER_H
|
||||
|
||||
#include "portability_macros.h"
|
||||
|
||||
/*-*******************************************************
|
||||
* Compiler specifics
|
||||
*********************************************************/
|
||||
/* force inlining */
|
||||
|
||||
#if !defined(ZSTD_NO_INLINE)
|
||||
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# define INLINE_KEYWORD inline
|
||||
#else
|
||||
# define INLINE_KEYWORD
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__ICCARM__)
|
||||
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
||||
#elif defined(_MSC_VER)
|
||||
# define FORCE_INLINE_ATTR __forceinline
|
||||
#else
|
||||
# define FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#define INLINE_KEYWORD
|
||||
#define FORCE_INLINE_ATTR
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
|
||||
This explicitly marks such functions as __cdecl so that the code will still compile
|
||||
if a CC other than __cdecl has been made the default.
|
||||
*/
|
||||
#if defined(_MSC_VER)
|
||||
# define WIN_CDECL __cdecl
|
||||
#else
|
||||
# define WIN_CDECL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
|
||||
* parameters. They must be inlined for the compiler to eliminate the constant
|
||||
* branches.
|
||||
*/
|
||||
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
/**
|
||||
* HINT_INLINE is used to help the compiler generate better code. It is *not*
|
||||
* used for "templates", so it can be tweaked based on the compilers
|
||||
* performance.
|
||||
*
|
||||
* gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
|
||||
* always_inline attribute.
|
||||
*
|
||||
* clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
|
||||
* attribute.
|
||||
*/
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
|
||||
# define HINT_INLINE static INLINE_KEYWORD
|
||||
#else
|
||||
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
||||
#endif
|
||||
|
||||
/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
|
||||
#if defined(__GNUC__)
|
||||
# define UNUSED_ATTR __attribute__((unused))
|
||||
#else
|
||||
# define UNUSED_ATTR
|
||||
#endif
|
||||
|
||||
/* force no inlining */
|
||||
#ifdef _MSC_VER
|
||||
# define FORCE_NOINLINE static __declspec(noinline)
|
||||
#else
|
||||
# if defined(__GNUC__) || defined(__ICCARM__)
|
||||
# define FORCE_NOINLINE static __attribute__((__noinline__))
|
||||
# else
|
||||
# define FORCE_NOINLINE static
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* target attribute */
|
||||
#if defined(__GNUC__) || defined(__ICCARM__)
|
||||
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
|
||||
#else
|
||||
# define TARGET_ATTRIBUTE(target)
|
||||
#endif
|
||||
|
||||
/* Target attribute for BMI2 dynamic dispatch.
|
||||
* Enable lzcnt, bmi, and bmi2.
|
||||
* We test for bmi1 & bmi2. lzcnt is included in bmi1.
|
||||
*/
|
||||
#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
|
||||
|
||||
/* prefetch
|
||||
* can be disabled, by declaring NO_PREFETCH build macro */
|
||||
#if defined(NO_PREFETCH)
|
||||
# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
|
||||
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
|
||||
#else
|
||||
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
|
||||
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
|
||||
# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
|
||||
# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
|
||||
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
|
||||
# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
|
||||
# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
|
||||
# elif defined(__aarch64__)
|
||||
# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
|
||||
# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
|
||||
# else
|
||||
# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
|
||||
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
|
||||
# endif
|
||||
#endif /* NO_PREFETCH */
|
||||
|
||||
#define CACHELINE_SIZE 64
|
||||
|
||||
#define PREFETCH_AREA(p, s) { \
|
||||
const char* const _ptr = (const char*)(p); \
|
||||
size_t const _size = (size_t)(s); \
|
||||
size_t _pos; \
|
||||
for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
|
||||
PREFETCH_L2(_ptr + _pos); \
|
||||
} \
|
||||
}
|
||||
|
||||
/* vectorization
|
||||
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
|
||||
* and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
|
||||
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
|
||||
# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
|
||||
# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
||||
# else
|
||||
# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
|
||||
# endif
|
||||
#else
|
||||
# define DONT_VECTORIZE
|
||||
#endif
|
||||
|
||||
/* Tell the compiler that a branch is likely or unlikely.
|
||||
* Only use these macros if it causes the compiler to generate better code.
|
||||
* If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
|
||||
* and clang, please do.
|
||||
*/
|
||||
#if defined(__GNUC__)
|
||||
#define LIKELY(x) (__builtin_expect((x), 1))
|
||||
#define UNLIKELY(x) (__builtin_expect((x), 0))
|
||||
#else
|
||||
#define LIKELY(x) (x)
|
||||
#define UNLIKELY(x) (x)
|
||||
#endif
|
||||
|
||||
#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
|
||||
# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
|
||||
#else
|
||||
# define ZSTD_UNREACHABLE { assert(0); }
|
||||
#endif
|
||||
|
||||
/* disable warnings */
|
||||
#ifdef _MSC_VER /* Visual Studio */
|
||||
# include <intrin.h> /* For Visual 2005 */
|
||||
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
|
||||
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
|
||||
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
|
||||
#endif
|
||||
|
||||
/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
|
||||
#ifndef STATIC_BMI2
|
||||
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
|
||||
# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
|
||||
# define STATIC_BMI2 1
|
||||
# endif
|
||||
# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__)
|
||||
# define STATIC_BMI2 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef STATIC_BMI2
|
||||
#define STATIC_BMI2 0
|
||||
#endif
|
||||
|
||||
/* compile time determination of SIMD support */
|
||||
#if !defined(ZSTD_NO_INTRINSICS)
|
||||
# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
|
||||
# define ZSTD_ARCH_X86_SSE2
|
||||
# endif
|
||||
# if defined(__ARM_NEON) || defined(_M_ARM64)
|
||||
# define ZSTD_ARCH_ARM_NEON
|
||||
# endif
|
||||
#
|
||||
# if defined(ZSTD_ARCH_X86_SSE2)
|
||||
# include <emmintrin.h>
|
||||
# elif defined(ZSTD_ARCH_ARM_NEON)
|
||||
# include <arm_neon.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* C-language Attributes are added in C23. */
|
||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
|
||||
# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
|
||||
#else
|
||||
# define ZSTD_HAS_C_ATTRIBUTE(x) 0
|
||||
#endif
|
||||
|
||||
/* Only use C++ attributes in C++. Some compilers report support for C++
|
||||
* attributes when compiling with C.
|
||||
*/
|
||||
#if defined(__cplusplus) && defined(__has_cpp_attribute)
|
||||
# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
|
||||
#else
|
||||
# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
|
||||
#endif
|
||||
|
||||
/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
|
||||
* - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
|
||||
* - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
|
||||
* - Else: __attribute__((__fallthrough__))
|
||||
*/
|
||||
#ifndef ZSTD_FALLTHROUGH
|
||||
# if ZSTD_HAS_C_ATTRIBUTE(fallthrough)
|
||||
# define ZSTD_FALLTHROUGH [[fallthrough]]
|
||||
# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough)
|
||||
# define ZSTD_FALLTHROUGH [[fallthrough]]
|
||||
# elif __has_attribute(__fallthrough__)
|
||||
/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon
|
||||
* gcc complains about: a label can only be part of a statement and a declaration is not a statement.
|
||||
*/
|
||||
# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__))
|
||||
# else
|
||||
# define ZSTD_FALLTHROUGH
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*-**************************************************************
|
||||
* Alignment check
|
||||
*****************************************************************/
|
||||
|
||||
/* this test was initially positioned in mem.h,
|
||||
* but this file is removed (or replaced) for linux kernel
|
||||
* so it's now hosted in compiler.h,
|
||||
* which remains valid for both user & kernel spaces.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ALIGNOF
|
||||
# if defined(__GNUC__) || defined(_MSC_VER)
|
||||
/* covers gcc, clang & MSVC */
|
||||
/* note : this section must come first, before C11,
|
||||
* due to a limitation in the kernel source generator */
|
||||
# define ZSTD_ALIGNOF(T) __alignof(T)
|
||||
|
||||
# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
|
||||
/* C11 support */
|
||||
# include <stdalign.h>
|
||||
# define ZSTD_ALIGNOF(T) alignof(T)
|
||||
|
||||
# else
|
||||
/* No known support for alignof() - imperfect backup */
|
||||
# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
|
||||
|
||||
# endif
|
||||
#endif /* ZSTD_ALIGNOF */
|
||||
|
||||
/*-**************************************************************
|
||||
* Sanitizer
|
||||
*****************************************************************/
|
||||
|
||||
/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
|
||||
* abundance of caution, disable our custom poisoning on mingw. */
|
||||
#ifdef __MINGW32__
|
||||
#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
|
||||
#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
|
||||
#endif
|
||||
#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
|
||||
#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
||||
* We therefore declare the functions we need ourselves, rather than trying to
|
||||
* include the header file... */
|
||||
#include <stddef.h> /* size_t */
|
||||
#define ZSTD_DEPS_NEED_STDINT
|
||||
#include "zstd_deps.h" /* intptr_t */
|
||||
|
||||
/* Make memory region fully initialized (without changing its contents). */
|
||||
void __msan_unpoison(const volatile void *a, size_t size);
|
||||
|
||||
/* Make memory region fully uninitialized (without changing its contents).
|
||||
This is a legacy interface that does not update origin information. Use
|
||||
__msan_allocated_memory() instead. */
|
||||
void __msan_poison(const volatile void *a, size_t size);
|
||||
|
||||
/* Returns the offset of the first (at least partially) poisoned byte in the
|
||||
memory range, or -1 if the whole range is good. */
|
||||
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
||||
|
||||
/* Print shadow and origin for the memory range to stderr in a human-readable
|
||||
format. */
|
||||
void __msan_print_shadow(const volatile void *x, size_t size);
|
||||
#endif
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
|
||||
* We therefore declare the functions we need ourselves, rather than trying to
|
||||
* include the header file... */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
/**
|
||||
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
|
||||
*
|
||||
* This memory must be previously allocated by your program. Instrumented
|
||||
* code is forbidden from accessing addresses in this region until it is
|
||||
* unpoisoned. This function is not guaranteed to poison the entire region -
|
||||
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
|
||||
* alignment restrictions.
|
||||
*
|
||||
* \note This function is not thread-safe because no two threads can poison or
|
||||
* unpoison memory in the same memory region simultaneously.
|
||||
*
|
||||
* \param addr Start of memory region.
|
||||
* \param size Size of memory region. */
|
||||
void __asan_poison_memory_region(void const volatile *addr, size_t size);
|
||||
|
||||
/**
|
||||
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
|
||||
*
|
||||
* This memory must be previously allocated by your program. Accessing
|
||||
* addresses in this region is allowed until this region is poisoned again.
|
||||
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
|
||||
* to ASan alignment restrictions.
|
||||
*
|
||||
* \note This function is not thread-safe because no two threads can
|
||||
* poison or unpoison memory in the same memory region simultaneously.
|
||||
*
|
||||
* \param addr Start of memory region.
|
||||
* \param size Size of memory region. */
|
||||
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_COMPILER_H */
|
||||
213
ext/zstd/lib/common/cpu.h
Normal file
213
ext/zstd/lib/common/cpu.h
Normal file
@@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMMON_CPU_H
|
||||
#define ZSTD_COMMON_CPU_H
|
||||
|
||||
/**
|
||||
* Implementation taken from folly/CpuId.h
|
||||
* https://github.com/facebook/folly/blob/master/folly/CpuId.h
|
||||
*/
|
||||
|
||||
#include "mem.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
U32 f1c;
|
||||
U32 f1d;
|
||||
U32 f7b;
|
||||
U32 f7c;
|
||||
} ZSTD_cpuid_t;
|
||||
|
||||
MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
|
||||
U32 f1c = 0;
|
||||
U32 f1d = 0;
|
||||
U32 f7b = 0;
|
||||
U32 f7c = 0;
|
||||
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
|
||||
int reg[4];
|
||||
__cpuid((int*)reg, 0);
|
||||
{
|
||||
int const n = reg[0];
|
||||
if (n >= 1) {
|
||||
__cpuid((int*)reg, 1);
|
||||
f1c = (U32)reg[2];
|
||||
f1d = (U32)reg[3];
|
||||
}
|
||||
if (n >= 7) {
|
||||
__cpuidex((int*)reg, 7, 0);
|
||||
f7b = (U32)reg[1];
|
||||
f7c = (U32)reg[2];
|
||||
}
|
||||
}
|
||||
#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
|
||||
/* The following block like the normal cpuid branch below, but gcc
|
||||
* reserves ebx for use of its pic register so we must specially
|
||||
* handle the save and restore to avoid clobbering the register
|
||||
*/
|
||||
U32 n;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(n)
|
||||
: "a"(0)
|
||||
: "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(f1a), "=c"(f1c), "=d"(f1d)
|
||||
: "a"(1));
|
||||
}
|
||||
if (n >= 7) {
|
||||
__asm__(
|
||||
"pushl %%ebx\n\t"
|
||||
"cpuid\n\t"
|
||||
"movl %%ebx, %%eax\n\t"
|
||||
"popl %%ebx"
|
||||
: "=a"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
|
||||
U32 n;
|
||||
__asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
|
||||
if (n >= 1) {
|
||||
U32 f1a;
|
||||
__asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
|
||||
}
|
||||
if (n >= 7) {
|
||||
U32 f7a;
|
||||
__asm__("cpuid"
|
||||
: "=a"(f7a), "=b"(f7b), "=c"(f7c)
|
||||
: "a"(7), "c"(0)
|
||||
: "edx");
|
||||
}
|
||||
#endif
|
||||
{
|
||||
ZSTD_cpuid_t cpuid;
|
||||
cpuid.f1c = f1c;
|
||||
cpuid.f1d = f1d;
|
||||
cpuid.f7b = f7b;
|
||||
cpuid.f7c = f7c;
|
||||
return cpuid;
|
||||
}
|
||||
}
|
||||
|
||||
#define X(name, r, bit) \
|
||||
MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
|
||||
return ((cpuid.r) & (1U << bit)) != 0; \
|
||||
}
|
||||
|
||||
/* cpuid(1): Processor Info and Feature Bits. */
|
||||
#define C(name, bit) X(name, f1c, bit)
|
||||
C(sse3, 0)
|
||||
C(pclmuldq, 1)
|
||||
C(dtes64, 2)
|
||||
C(monitor, 3)
|
||||
C(dscpl, 4)
|
||||
C(vmx, 5)
|
||||
C(smx, 6)
|
||||
C(eist, 7)
|
||||
C(tm2, 8)
|
||||
C(ssse3, 9)
|
||||
C(cnxtid, 10)
|
||||
C(fma, 12)
|
||||
C(cx16, 13)
|
||||
C(xtpr, 14)
|
||||
C(pdcm, 15)
|
||||
C(pcid, 17)
|
||||
C(dca, 18)
|
||||
C(sse41, 19)
|
||||
C(sse42, 20)
|
||||
C(x2apic, 21)
|
||||
C(movbe, 22)
|
||||
C(popcnt, 23)
|
||||
C(tscdeadline, 24)
|
||||
C(aes, 25)
|
||||
C(xsave, 26)
|
||||
C(osxsave, 27)
|
||||
C(avx, 28)
|
||||
C(f16c, 29)
|
||||
C(rdrand, 30)
|
||||
#undef C
|
||||
#define D(name, bit) X(name, f1d, bit)
|
||||
D(fpu, 0)
|
||||
D(vme, 1)
|
||||
D(de, 2)
|
||||
D(pse, 3)
|
||||
D(tsc, 4)
|
||||
D(msr, 5)
|
||||
D(pae, 6)
|
||||
D(mce, 7)
|
||||
D(cx8, 8)
|
||||
D(apic, 9)
|
||||
D(sep, 11)
|
||||
D(mtrr, 12)
|
||||
D(pge, 13)
|
||||
D(mca, 14)
|
||||
D(cmov, 15)
|
||||
D(pat, 16)
|
||||
D(pse36, 17)
|
||||
D(psn, 18)
|
||||
D(clfsh, 19)
|
||||
D(ds, 21)
|
||||
D(acpi, 22)
|
||||
D(mmx, 23)
|
||||
D(fxsr, 24)
|
||||
D(sse, 25)
|
||||
D(sse2, 26)
|
||||
D(ss, 27)
|
||||
D(htt, 28)
|
||||
D(tm, 29)
|
||||
D(pbe, 31)
|
||||
#undef D
|
||||
|
||||
/* cpuid(7): Extended Features. */
|
||||
#define B(name, bit) X(name, f7b, bit)
|
||||
B(bmi1, 3)
|
||||
B(hle, 4)
|
||||
B(avx2, 5)
|
||||
B(smep, 7)
|
||||
B(bmi2, 8)
|
||||
B(erms, 9)
|
||||
B(invpcid, 10)
|
||||
B(rtm, 11)
|
||||
B(mpx, 14)
|
||||
B(avx512f, 16)
|
||||
B(avx512dq, 17)
|
||||
B(rdseed, 18)
|
||||
B(adx, 19)
|
||||
B(smap, 20)
|
||||
B(avx512ifma, 21)
|
||||
B(pcommit, 22)
|
||||
B(clflushopt, 23)
|
||||
B(clwb, 24)
|
||||
B(avx512pf, 26)
|
||||
B(avx512er, 27)
|
||||
B(avx512cd, 28)
|
||||
B(sha, 29)
|
||||
B(avx512bw, 30)
|
||||
B(avx512vl, 31)
|
||||
#undef B
|
||||
#define C(name, bit) X(name, f7c, bit)
|
||||
C(prefetchwt1, 0)
|
||||
C(avx512vbmi, 1)
|
||||
#undef C
|
||||
|
||||
#undef X
|
||||
|
||||
#endif /* ZSTD_COMMON_CPU_H */
|
||||
24
ext/zstd/lib/common/debug.c
vendored
Normal file
24
ext/zstd/lib/common/debug.c
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
/* ******************************************************************
|
||||
* debug
|
||||
* Part of FSE library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/*
|
||||
* This module only hosts one global variable
|
||||
* which can be used to dynamically influence the verbosity of traces,
|
||||
* such as DEBUGLOG and RAWLOG
|
||||
*/
|
||||
|
||||
#include "debug.h"
|
||||
|
||||
int g_debuglevel = DEBUGLEVEL;
|
||||
107
ext/zstd/lib/common/debug.h
Normal file
107
ext/zstd/lib/common/debug.h
Normal file
@@ -0,0 +1,107 @@
|
||||
/* ******************************************************************
|
||||
* debug
|
||||
* Part of FSE library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/*
|
||||
* The purpose of this header is to enable debug functions.
|
||||
* They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
|
||||
* and DEBUG_STATIC_ASSERT() for compile-time.
|
||||
*
|
||||
* By default, DEBUGLEVEL==0, which means run-time debug is disabled.
|
||||
*
|
||||
* Level 1 enables assert() only.
|
||||
* Starting level 2, traces can be generated and pushed to stderr.
|
||||
* The higher the level, the more verbose the traces.
|
||||
*
|
||||
* It's possible to dynamically adjust level using variable g_debug_level,
|
||||
* which is only declared if DEBUGLEVEL>=2,
|
||||
* and is a global variable, not multi-thread protected (use with care)
|
||||
*/
|
||||
|
||||
#ifndef DEBUG_H_12987983217
|
||||
#define DEBUG_H_12987983217
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* static assert is triggered at compile time, leaving no runtime artefact.
|
||||
* static assert only works with compile-time constants.
|
||||
* Also, this variant can only be used inside a function. */
|
||||
#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
|
||||
|
||||
|
||||
/* DEBUGLEVEL is expected to be defined externally,
|
||||
* typically through compiler command line.
|
||||
* Value must be a number. */
|
||||
#ifndef DEBUGLEVEL
|
||||
# define DEBUGLEVEL 0
|
||||
#endif
|
||||
|
||||
|
||||
/* recommended values for DEBUGLEVEL :
|
||||
* 0 : release mode, no debug, all run-time checks disabled
|
||||
* 1 : enables assert() only, no display
|
||||
* 2 : reserved, for currently active debug path
|
||||
* 3 : events once per object lifetime (CCtx, CDict, etc.)
|
||||
* 4 : events once per frame
|
||||
* 5 : events once per block
|
||||
* 6 : events once per sequence (verbose)
|
||||
* 7+: events at every position (*very* verbose)
|
||||
*
|
||||
* It's generally inconvenient to output traces > 5.
|
||||
* In which case, it's possible to selectively trigger high verbosity levels
|
||||
* by modifying g_debug_level.
|
||||
*/
|
||||
|
||||
#if (DEBUGLEVEL>=1)
|
||||
# define ZSTD_DEPS_NEED_ASSERT
|
||||
# include "zstd_deps.h"
|
||||
#else
|
||||
# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
|
||||
# define assert(condition) ((void)0) /* disable assert (default) */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if (DEBUGLEVEL>=2)
|
||||
# define ZSTD_DEPS_NEED_IO
|
||||
# include "zstd_deps.h"
|
||||
extern int g_debuglevel; /* the variable is only declared,
|
||||
it actually lives in debug.c,
|
||||
and is shared by the whole process.
|
||||
It's not thread-safe.
|
||||
It's useful when enabling very verbose levels
|
||||
on selective conditions (such as position in src) */
|
||||
|
||||
# define RAWLOG(l, ...) { \
|
||||
if (l<=g_debuglevel) { \
|
||||
ZSTD_DEBUG_PRINT(__VA_ARGS__); \
|
||||
} }
|
||||
# define DEBUGLOG(l, ...) { \
|
||||
if (l<=g_debuglevel) { \
|
||||
ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
|
||||
ZSTD_DEBUG_PRINT(" \n"); \
|
||||
} }
|
||||
#else
|
||||
# define RAWLOG(l, ...) {} /* disabled */
|
||||
# define DEBUGLOG(l, ...) {} /* disabled */
|
||||
#endif
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* DEBUG_H_12987983217 */
|
||||
340
ext/zstd/lib/common/entropy_common.c
vendored
Normal file
340
ext/zstd/lib/common/entropy_common.c
vendored
Normal file
@@ -0,0 +1,340 @@
|
||||
/* ******************************************************************
|
||||
* Common functions of New Generation Entropy library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* *************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "mem.h"
|
||||
#include "error_private.h" /* ERR_*, ERROR */
|
||||
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
|
||||
#include "fse.h"
|
||||
#include "huf.h"
|
||||
#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */
|
||||
|
||||
|
||||
/*=== Version ===*/
|
||||
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
|
||||
|
||||
|
||||
/*=== Error Management ===*/
|
||||
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
|
||||
const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding-decoding
|
||||
****************************************************************/
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) headerBuffer;
|
||||
const BYTE* const iend = istart + hbSize;
|
||||
const BYTE* ip = istart;
|
||||
int nbBits;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream;
|
||||
int bitCount;
|
||||
unsigned charnum = 0;
|
||||
unsigned const maxSV1 = *maxSVPtr + 1;
|
||||
int previous0 = 0;
|
||||
|
||||
if (hbSize < 8) {
|
||||
/* This function only works when hbSize >= 8 */
|
||||
char buffer[8] = {0};
|
||||
ZSTD_memcpy(buffer, headerBuffer, hbSize);
|
||||
{ size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
|
||||
buffer, sizeof(buffer));
|
||||
if (FSE_isError(countSize)) return countSize;
|
||||
if (countSize > hbSize) return ERROR(corruption_detected);
|
||||
return countSize;
|
||||
} }
|
||||
assert(hbSize >= 8);
|
||||
|
||||
/* init */
|
||||
ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
|
||||
bitStream = MEM_readLE32(ip);
|
||||
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
|
||||
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
|
||||
bitStream >>= 4;
|
||||
bitCount = 4;
|
||||
*tableLogPtr = nbBits;
|
||||
remaining = (1<<nbBits)+1;
|
||||
threshold = 1<<nbBits;
|
||||
nbBits++;
|
||||
|
||||
for (;;) {
|
||||
if (previous0) {
|
||||
/* Count the number of repeats. Each time the
|
||||
* 2-bit repeat code is 0b11 there is another
|
||||
* repeat.
|
||||
* Avoid UB by setting the high bit to 1.
|
||||
*/
|
||||
int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
|
||||
while (repeats >= 12) {
|
||||
charnum += 3 * 12;
|
||||
if (LIKELY(ip <= iend-7)) {
|
||||
ip += 3;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 7 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
|
||||
}
|
||||
charnum += 3 * repeats;
|
||||
bitStream >>= 2 * repeats;
|
||||
bitCount += 2 * repeats;
|
||||
|
||||
/* Add the final repeat which isn't 0b11. */
|
||||
assert((bitStream & 3) < 3);
|
||||
charnum += bitStream & 3;
|
||||
bitCount += 2;
|
||||
|
||||
/* This is an error, but break and return an error
|
||||
* at the end, because returning out of a loop makes
|
||||
* it harder for the compiler to optimize.
|
||||
*/
|
||||
if (charnum >= maxSV1) break;
|
||||
|
||||
/* We don't need to set the normalized count to 0
|
||||
* because we already memset the whole buffer to 0.
|
||||
*/
|
||||
|
||||
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
assert((bitCount >> 3) <= 3); /* For first condition to work */
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
}
|
||||
{
|
||||
int const max = (2*threshold-1) - remaining;
|
||||
int count;
|
||||
|
||||
if ((bitStream & (threshold-1)) < (U32)max) {
|
||||
count = bitStream & (threshold-1);
|
||||
bitCount += nbBits-1;
|
||||
} else {
|
||||
count = bitStream & (2*threshold-1);
|
||||
if (count >= threshold) count -= max;
|
||||
bitCount += nbBits;
|
||||
}
|
||||
|
||||
count--; /* extra accuracy */
|
||||
/* When it matters (small blocks), this is a
|
||||
* predictable branch, because we don't use -1.
|
||||
*/
|
||||
if (count >= 0) {
|
||||
remaining -= count;
|
||||
} else {
|
||||
assert(count == -1);
|
||||
remaining += count;
|
||||
}
|
||||
normalizedCounter[charnum++] = (short)count;
|
||||
previous0 = !count;
|
||||
|
||||
assert(threshold > 1);
|
||||
if (remaining < threshold) {
|
||||
/* This branch can be folded into the
|
||||
* threshold update condition because we
|
||||
* know that threshold > 1.
|
||||
*/
|
||||
if (remaining <= 1) break;
|
||||
nbBits = ZSTD_highbit32(remaining) + 1;
|
||||
threshold = 1 << (nbBits - 1);
|
||||
}
|
||||
if (charnum >= maxSV1) break;
|
||||
|
||||
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
|
||||
ip += bitCount>>3;
|
||||
bitCount &= 7;
|
||||
} else {
|
||||
bitCount -= (int)(8 * (iend - 4 - ip));
|
||||
bitCount &= 31;
|
||||
ip = iend - 4;
|
||||
}
|
||||
bitStream = MEM_readLE32(ip) >> bitCount;
|
||||
} }
|
||||
if (remaining != 1) return ERROR(corruption_detected);
|
||||
/* Only possible when there are too many zeros. */
|
||||
if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
|
||||
if (bitCount > 32) return ERROR(corruption_detected);
|
||||
*maxSVPtr = charnum-1;
|
||||
|
||||
ip += (bitCount+7)>>3;
|
||||
return ip-istart;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
static size_t FSE_readNCount_body_default(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t FSE_readNCount_bmi2(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize, int bmi2)
|
||||
{
|
||||
#if DYNAMIC_BMI2
|
||||
if (bmi2) {
|
||||
return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
#endif
|
||||
(void)bmi2;
|
||||
return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
|
||||
}
|
||||
|
||||
size_t FSE_readNCount(
|
||||
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
|
||||
const void* headerBuffer, size_t hbSize)
|
||||
{
|
||||
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
|
||||
}
|
||||
|
||||
|
||||
/*! HUF_readStats() :
|
||||
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
`huffWeight` is destination buffer.
|
||||
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
|
||||
@return : size read from `src` , or an error Code .
|
||||
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
|
||||
*/
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
|
||||
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize,
|
||||
int bmi2)
|
||||
{
|
||||
U32 weightTotal;
|
||||
const BYTE* ip = (const BYTE*) src;
|
||||
size_t iSize;
|
||||
size_t oSize;
|
||||
|
||||
if (!srcSize) return ERROR(srcSize_wrong);
|
||||
iSize = ip[0];
|
||||
/* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
|
||||
|
||||
if (iSize >= 128) { /* special header */
|
||||
oSize = iSize - 127;
|
||||
iSize = ((oSize+1)/2);
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
if (oSize >= hwSize) return ERROR(corruption_detected);
|
||||
ip += 1;
|
||||
{ U32 n;
|
||||
for (n=0; n<oSize; n+=2) {
|
||||
huffWeight[n] = ip[n/2] >> 4;
|
||||
huffWeight[n+1] = ip[n/2] & 15;
|
||||
} } }
|
||||
else { /* header compressed with FSE (normal case) */
|
||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||
/* max (hwSize-1) values decoded, as last one is implied */
|
||||
oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
|
||||
if (FSE_isError(oSize)) return oSize;
|
||||
}
|
||||
|
||||
/* collect weight stats */
|
||||
ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
|
||||
weightTotal = 0;
|
||||
{ U32 n; for (n=0; n<oSize; n++) {
|
||||
if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
rankStats[huffWeight[n]]++;
|
||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||
} }
|
||||
if (weightTotal == 0) return ERROR(corruption_detected);
|
||||
|
||||
/* get last non-null symbol weight (implied, total must be 2^n) */
|
||||
{ U32 const tableLog = ZSTD_highbit32(weightTotal) + 1;
|
||||
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
|
||||
*tableLogPtr = tableLog;
|
||||
/* determine last weight */
|
||||
{ U32 const total = 1 << tableLog;
|
||||
U32 const rest = total - weightTotal;
|
||||
U32 const verif = 1 << ZSTD_highbit32(rest);
|
||||
U32 const lastWeight = ZSTD_highbit32(rest) + 1;
|
||||
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
|
||||
huffWeight[oSize] = (BYTE)lastWeight;
|
||||
rankStats[lastWeight]++;
|
||||
} }
|
||||
|
||||
/* check tree construction validity */
|
||||
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
|
||||
|
||||
/* results */
|
||||
*nbSymbolsPtr = (U32)(oSize+1);
|
||||
return iSize+1;
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||
U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t wkspSize,
|
||||
int flags)
|
||||
{
|
||||
#if DYNAMIC_BMI2
|
||||
if (flags & HUF_flags_bmi2) {
|
||||
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
|
||||
}
|
||||
#endif
|
||||
(void)flags;
|
||||
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
|
||||
}
|
||||
63
ext/zstd/lib/common/error_private.c
vendored
Normal file
63
ext/zstd/lib/common/error_private.c
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* The purpose of this file is to have a single list of error strings embedded in binary */
|
||||
|
||||
#include "error_private.h"
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code)
|
||||
{
|
||||
#ifdef ZSTD_STRIP_ERROR_STRINGS
|
||||
(void)code;
|
||||
return "Error strings stripped";
|
||||
#else
|
||||
static const char* const notErrorCode = "Unspecified error code";
|
||||
switch( code )
|
||||
{
|
||||
case PREFIX(no_error): return "No error detected";
|
||||
case PREFIX(GENERIC): return "Error (generic)";
|
||||
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
|
||||
case PREFIX(version_unsupported): return "Version not supported";
|
||||
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
|
||||
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
|
||||
case PREFIX(corruption_detected): return "Data corruption detected";
|
||||
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
|
||||
case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification";
|
||||
case PREFIX(parameter_unsupported): return "Unsupported parameter";
|
||||
case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters";
|
||||
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
|
||||
case PREFIX(init_missing): return "Context should be init first";
|
||||
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
|
||||
case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
|
||||
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
|
||||
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
|
||||
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
|
||||
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
|
||||
case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected";
|
||||
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
|
||||
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
|
||||
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
|
||||
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
|
||||
case PREFIX(srcSize_wrong): return "Src size is incorrect";
|
||||
case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
|
||||
case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full";
|
||||
case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty";
|
||||
/* following error codes are not stable and may be removed or changed in a future version */
|
||||
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
|
||||
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
|
||||
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
|
||||
case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
|
||||
case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code";
|
||||
case PREFIX(externalSequences_invalid): return "External sequences are not valid";
|
||||
case PREFIX(maxCode):
|
||||
default: return notErrorCode;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
159
ext/zstd/lib/common/error_private.h
Normal file
159
ext/zstd/lib/common/error_private.h
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* Note : this module is expected to remain private, do not expose it */
|
||||
|
||||
#ifndef ERROR_H_MODULE
|
||||
#define ERROR_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "../zstd_errors.h" /* enum list */
|
||||
#include "compiler.h"
|
||||
#include "debug.h"
|
||||
#include "zstd_deps.h" /* size_t */
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Compiler-specific
|
||||
******************************************/
|
||||
#if defined(__GNUC__)
|
||||
# define ERR_STATIC static __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define ERR_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define ERR_STATIC static __inline
|
||||
#else
|
||||
# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Customization (error_public.h)
|
||||
******************************************/
|
||||
typedef ZSTD_ErrorCode ERR_enum;
|
||||
#define PREFIX(name) ZSTD_error_##name
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error codes handling
|
||||
******************************************/
|
||||
#undef ERROR /* already defined on Visual Studio */
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
|
||||
|
||||
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
|
||||
|
||||
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
|
||||
|
||||
/* check and forward error code */
|
||||
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
|
||||
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Error Strings
|
||||
******************************************/
|
||||
|
||||
const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
|
||||
|
||||
ERR_STATIC const char* ERR_getErrorName(size_t code)
|
||||
{
|
||||
return ERR_getErrorString(ERR_getErrorCode(code));
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore: this is an internal helper.
|
||||
*
|
||||
* This is a helper function to help force C99-correctness during compilation.
|
||||
* Under strict compilation modes, variadic macro arguments can't be empty.
|
||||
* However, variadic function arguments can be. Using a function therefore lets
|
||||
* us statically check that at least one (string) argument was passed,
|
||||
* independent of the compilation flags.
|
||||
*/
|
||||
static INLINE_KEYWORD UNUSED_ATTR
|
||||
void _force_has_format_string(const char *format, ...) {
|
||||
(void)format;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore: this is an internal helper.
|
||||
*
|
||||
* We want to force this function invocation to be syntactically correct, but
|
||||
* we don't want to force runtime evaluation of its arguments.
|
||||
*/
|
||||
#define _FORCE_HAS_FORMAT_STRING(...) \
|
||||
if (0) { \
|
||||
_force_has_format_string(__VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define ERR_QUOTE(str) #str
|
||||
|
||||
/**
|
||||
* Return the specified error if the condition evaluates to true.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
* In order to do that (particularly, printing the conditional that failed),
|
||||
* this can't just wrap RETURN_ERROR().
|
||||
*/
|
||||
#define RETURN_ERROR_IF(cond, err, ...) \
|
||||
if (cond) { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
|
||||
__FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
|
||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return ERROR(err); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Unconditionally return the specified error.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
*/
|
||||
#define RETURN_ERROR(err, ...) \
|
||||
do { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
|
||||
__FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
|
||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return ERROR(err); \
|
||||
} while(0);
|
||||
|
||||
/**
|
||||
* If the provided expression evaluates to an error code, returns that error code.
|
||||
*
|
||||
* In debug modes, prints additional information.
|
||||
*/
|
||||
#define FORWARD_IF_ERROR(err, ...) \
|
||||
do { \
|
||||
size_t const err_code = (err); \
|
||||
if (ERR_isError(err_code)) { \
|
||||
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
|
||||
__FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
|
||||
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
|
||||
RAWLOG(3, ": " __VA_ARGS__); \
|
||||
RAWLOG(3, "\n"); \
|
||||
return err_code; \
|
||||
} \
|
||||
} while(0);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ERROR_H_MODULE */
|
||||
639
ext/zstd/lib/common/fse.h
Normal file
639
ext/zstd/lib/common/fse.h
Normal file
@@ -0,0 +1,639 @@
|
||||
/* ******************************************************************
|
||||
* FSE : Finite State Entropy codec
|
||||
* Public Prototypes declaration
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef FSE_H
|
||||
#define FSE_H
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include "zstd_deps.h" /* size_t, ptrdiff_t */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE_PUBLIC_API : control library symbols visibility
|
||||
******************************************/
|
||||
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
|
||||
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
|
||||
# define FSE_PUBLIC_API __declspec(dllexport)
|
||||
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
|
||||
# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define FSE_PUBLIC_API
|
||||
#endif
|
||||
|
||||
/*------ Version ------*/
|
||||
#define FSE_VERSION_MAJOR 0
|
||||
#define FSE_VERSION_MINOR 9
|
||||
#define FSE_VERSION_RELEASE 0
|
||||
|
||||
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
|
||||
#define FSE_QUOTE(str) #str
|
||||
#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
|
||||
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
|
||||
|
||||
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
|
||||
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* Tool functions
|
||||
******************************************/
|
||||
FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
|
||||
|
||||
/* Error Management */
|
||||
FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
|
||||
FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
/*-*****************************************
|
||||
* FSE detailed API
|
||||
******************************************/
|
||||
/*!
|
||||
FSE_compress() does the following:
|
||||
1. count symbol occurrence from source[] into table count[] (see hist.h)
|
||||
2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
3. save normalized counters to memory buffer using writeNCount()
|
||||
4. build encoding table 'CTable' from normalized counters
|
||||
5. encode the data stream using encoding table 'CTable'
|
||||
|
||||
FSE_decompress() does the following:
|
||||
1. read normalized counters with readNCount()
|
||||
2. build decoding table 'DTable' from normalized counters
|
||||
3. decode the data stream using decoding table 'DTable'
|
||||
|
||||
The following API allows targeting specific sub-functions for advanced tasks.
|
||||
For example, it's possible to compress several blocks using the same 'CTable',
|
||||
or to save and provide normalized distribution using external method.
|
||||
*/
|
||||
|
||||
/* *** COMPRESSION *** */
|
||||
|
||||
/*! FSE_optimalTableLog():
|
||||
dynamically downsize 'tableLog' when conditions are met.
|
||||
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
|
||||
@return : recommended tableLog (necessarily <= 'maxTableLog') */
|
||||
FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
|
||||
|
||||
/*! FSE_normalizeCount():
|
||||
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
|
||||
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
|
||||
useLowProbCount is a boolean parameter which trades off compressed size for
|
||||
faster header decoding. When it is set to 1, the compressed data will be slightly
|
||||
smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
|
||||
faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
|
||||
is a good default, since header deserialization makes a big speed difference.
|
||||
Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
|
||||
@return : tableLog,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
|
||||
const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
|
||||
|
||||
/*! FSE_NCountWriteBound():
|
||||
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
|
||||
Typically useful for allocation purpose. */
|
||||
FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_writeNCount():
|
||||
Compactly save 'normalizedCounter' into 'buffer'.
|
||||
@return : size of the compressed table,
|
||||
or an errorCode, which can be tested using FSE_isError(). */
|
||||
FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
|
||||
const short* normalizedCounter,
|
||||
unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! Constructor and Destructor of FSE_CTable.
|
||||
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
|
||||
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
|
||||
|
||||
/*! FSE_buildCTable():
|
||||
Builds `ct`, which must be already allocated, using FSE_createCTable().
|
||||
@return : 0, or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
|
||||
|
||||
/*! FSE_compress_usingCTable():
|
||||
Compress `src` using `ct` into `dst` which must be already allocated.
|
||||
@return : size of compressed data (<= `dstCapacity`),
|
||||
or 0 if compressed data could not fit into `dst`,
|
||||
or an errorCode, which can be tested using FSE_isError() */
|
||||
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
The first step is to count all symbols. FSE_count() does this job very fast.
|
||||
Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
|
||||
'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
|
||||
maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
|
||||
FSE_count() will return the number of occurrence of the most frequent symbol.
|
||||
This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
The next step is to normalize the frequencies.
|
||||
FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
|
||||
It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
|
||||
You can use 'tableLog'==0 to mean "use default tableLog value".
|
||||
If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
|
||||
which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
|
||||
|
||||
The result of FSE_normalizeCount() will be saved into a table,
|
||||
called 'normalizedCounter', which is a table of signed short.
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
|
||||
The return value is tableLog if everything proceeded as expected.
|
||||
It is 0 if there is a single symbol within distribution.
|
||||
If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
|
||||
'buffer' must be already allocated.
|
||||
For guaranteed success, buffer size must be at least FSE_headerBound().
|
||||
The result of the function is the number of bytes written into 'buffer'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
|
||||
|
||||
'normalizedCounter' can then be used to create the compression table 'CTable'.
|
||||
The space required by 'CTable' must be already allocated, using FSE_createCTable().
|
||||
You can then use FSE_buildCTable() to fill 'CTable'.
|
||||
If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
|
||||
'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
|
||||
Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
|
||||
The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
|
||||
If it returns '0', compressed data could not fit into 'dst'.
|
||||
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
|
||||
*/
|
||||
|
||||
|
||||
/* *** DECOMPRESSION *** */
|
||||
|
||||
/*! FSE_readNCount():
|
||||
Read compactly saved 'normalizedCounter' from 'rBuffer'.
|
||||
@return : size read from 'rBuffer',
|
||||
or an errorCode, which can be tested using FSE_isError().
|
||||
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
|
||||
FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
|
||||
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
|
||||
const void* rBuffer, size_t rBuffSize);
|
||||
|
||||
/*! FSE_readNCount_bmi2():
|
||||
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
|
||||
*/
|
||||
FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
|
||||
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
|
||||
const void* rBuffer, size_t rBuffSize, int bmi2);
|
||||
|
||||
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
|
||||
|
||||
/*!
|
||||
Tutorial :
|
||||
----------
|
||||
(Note : these functions only decompress FSE-compressed blocks.
|
||||
If block is uncompressed, use memcpy() instead
|
||||
If block is a single repeated byte, use memset() instead )
|
||||
|
||||
The first step is to obtain the normalized frequencies of symbols.
|
||||
This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
|
||||
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
|
||||
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
|
||||
or size the table to handle worst case situations (typically 256).
|
||||
FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
|
||||
The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
|
||||
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
|
||||
This is performed by the function FSE_buildDTable().
|
||||
The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError().
|
||||
|
||||
`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
|
||||
`cSrcSize` must be strictly correct, otherwise decompression will fail.
|
||||
FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
|
||||
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
|
||||
*/
|
||||
|
||||
#endif /* FSE_H */
|
||||
|
||||
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
|
||||
#define FSE_H_FSE_STATIC_LINKING_ONLY
|
||||
|
||||
/* *** Dependency *** */
|
||||
#include "bitstream.h"
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Static allocation
|
||||
*******************************************/
|
||||
/* FSE buffer bounds */
|
||||
#define FSE_NCOUNTBOUND 512
|
||||
#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
|
||||
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
|
||||
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
|
||||
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
|
||||
/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
|
||||
#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
|
||||
#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE advanced API
|
||||
***************************************** */
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
|
||||
/**< same as FSE_optimalTableLog(), which used `minus==2` */
|
||||
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
|
||||
/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
|
||||
* See FSE_buildCTable_wksp() for breakdown of workspace usage.
|
||||
*/
|
||||
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
|
||||
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
|
||||
#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
|
||||
#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
|
||||
FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
|
||||
/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
|
||||
|
||||
#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
|
||||
#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
|
||||
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
|
||||
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`.
|
||||
* Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */
|
||||
|
||||
typedef enum {
|
||||
FSE_repeat_none, /**< Cannot use the previous table */
|
||||
FSE_repeat_check, /**< Can use the previous table but it must be checked */
|
||||
FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
|
||||
} FSE_repeat;
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol compression API
|
||||
*******************************************/
|
||||
/*!
|
||||
This API consists of small unitary functions, which highly benefit from being inlined.
|
||||
Hence their body are included in next section.
|
||||
*/
|
||||
typedef struct {
|
||||
ptrdiff_t value;
|
||||
const void* stateTable;
|
||||
const void* symbolTT;
|
||||
unsigned stateLog;
|
||||
} FSE_CState_t;
|
||||
|
||||
static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
|
||||
|
||||
static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
|
||||
|
||||
static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
|
||||
|
||||
/**<
|
||||
These functions are inner components of FSE_compress_usingCTable().
|
||||
They allow the creation of custom streams, mixing multiple tables and bit sources.
|
||||
|
||||
A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
|
||||
So the first symbol you will encode is the last you will decode, like a LIFO stack.
|
||||
|
||||
You will need a few variables to track your CStream. They are :
|
||||
|
||||
FSE_CTable ct; // Provided by FSE_buildCTable()
|
||||
BIT_CStream_t bitStream; // bitStream tracking structure
|
||||
FSE_CState_t state; // State tracking structure (can have several)
|
||||
|
||||
|
||||
The first thing to do is to init bitStream and state.
|
||||
size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
|
||||
FSE_initCState(&state, ct);
|
||||
|
||||
Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
|
||||
You can then encode your input data, byte after byte.
|
||||
FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
|
||||
Remember decoding will be done in reverse direction.
|
||||
FSE_encodeByte(&bitStream, &state, symbol);
|
||||
|
||||
At any time, you can also add any bit sequence.
|
||||
Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
|
||||
BIT_addBits(&bitStream, bitField, nbBits);
|
||||
|
||||
The above methods don't commit data to memory, they just store it into local register, for speed.
|
||||
Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
|
||||
Writing data to memory is a manual operation, performed by the flushBits function.
|
||||
BIT_flushBits(&bitStream);
|
||||
|
||||
Your last FSE encoding operation shall be to flush your last state value(s).
|
||||
FSE_flushState(&bitStream, &state);
|
||||
|
||||
Finally, you must close the bitStream.
|
||||
The function returns the size of CStream in bytes.
|
||||
If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
|
||||
If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
|
||||
size_t size = BIT_closeCStream(&bitStream);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE symbol decompression API
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
size_t state;
|
||||
const void* table; /* precise table may vary, depending on U16 */
|
||||
} FSE_DState_t;
|
||||
|
||||
|
||||
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
|
||||
|
||||
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
|
||||
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
|
||||
|
||||
/**<
|
||||
Let's now decompose FSE_decompress_usingDTable() into its unitary components.
|
||||
You will decode FSE-encoded symbols from the bitStream,
|
||||
and also any other bitFields you put in, **in reverse order**.
|
||||
|
||||
You will need a few variables to track your bitStream. They are :
|
||||
|
||||
BIT_DStream_t DStream; // Stream context
|
||||
FSE_DState_t DState; // State context. Multiple ones are possible
|
||||
FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
|
||||
|
||||
The first thing to do is to init the bitStream.
|
||||
errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
|
||||
|
||||
You should then retrieve your initial state(s)
|
||||
(in reverse flushing order if you have several ones) :
|
||||
errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
|
||||
|
||||
You can then decode your data, symbol after symbol.
|
||||
For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
|
||||
Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
|
||||
unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
|
||||
|
||||
You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
|
||||
Note : maximum allowed nbBits is 25, for 32-bits compatibility
|
||||
size_t bitField = BIT_readBits(&DStream, nbBits);
|
||||
|
||||
All above operations only read from local register (which size depends on size_t).
|
||||
Refueling the register from memory is manually performed by the reload method.
|
||||
endSignal = FSE_reloadDStream(&DStream);
|
||||
|
||||
BIT_reloadDStream() result tells if there is still some more data to read from DStream.
|
||||
BIT_DStream_unfinished : there is still some data left into the DStream.
|
||||
BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
|
||||
BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
|
||||
BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
|
||||
|
||||
When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
|
||||
to properly detect the exact end of stream.
|
||||
After each decoded symbol, check if DStream is fully consumed using this simple test :
|
||||
BIT_reloadDStream(&DStream) >= BIT_DStream_completed
|
||||
|
||||
When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
|
||||
Checking if DStream has reached its end is performed by :
|
||||
BIT_endOfDStream(&DStream);
|
||||
Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
|
||||
FSE_endOfDState(&DState);
|
||||
*/
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* FSE unsafe API
|
||||
*******************************************/
|
||||
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
|
||||
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
|
||||
|
||||
|
||||
/* *****************************************
|
||||
* Implementation of inlined functions
|
||||
*******************************************/
|
||||
typedef struct {
|
||||
int deltaFindState;
|
||||
U32 deltaNbBits;
|
||||
} FSE_symbolCompressionTransform; /* total 8 bytes */
|
||||
|
||||
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
|
||||
{
|
||||
const void* ptr = ct;
|
||||
const U16* u16ptr = (const U16*) ptr;
|
||||
const U32 tableLog = MEM_read16(ptr);
|
||||
statePtr->value = (ptrdiff_t)1<<tableLog;
|
||||
statePtr->stateTable = u16ptr+2;
|
||||
statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
|
||||
statePtr->stateLog = tableLog;
|
||||
}
|
||||
|
||||
|
||||
/*! FSE_initCState2() :
|
||||
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
|
||||
* uses the smallest state value possible, saving the cost of this symbol */
|
||||
MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
|
||||
{
|
||||
FSE_initCState(statePtr, ct);
|
||||
{ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
|
||||
statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
|
||||
statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
|
||||
{
|
||||
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
|
||||
const U16* const stateTable = (const U16*)(statePtr->stateTable);
|
||||
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
|
||||
BIT_addBits(bitC, statePtr->value, nbBitsOut);
|
||||
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
|
||||
{
|
||||
BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
|
||||
BIT_flushBits(bitC);
|
||||
}
|
||||
|
||||
|
||||
/* FSE_getMaxNbBits() :
|
||||
* Approximate maximum cost of a symbol, in bits.
|
||||
* Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
|
||||
{
|
||||
const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
|
||||
return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
|
||||
}
|
||||
|
||||
/* FSE_bitCost() :
|
||||
* Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
|
||||
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
|
||||
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
|
||||
MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
|
||||
{
|
||||
const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
|
||||
U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
|
||||
U32 const threshold = (minNbBits+1) << 16;
|
||||
assert(tableLog < 16);
|
||||
assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
|
||||
{ U32 const tableSize = 1 << tableLog;
|
||||
U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
|
||||
U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
|
||||
U32 const bitMultiplier = 1 << accuracyLog;
|
||||
assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
|
||||
assert(normalizedDeltaFromThreshold <= bitMultiplier);
|
||||
return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ====== Decompression ====== */
|
||||
|
||||
typedef struct {
|
||||
U16 tableLog;
|
||||
U16 fastMode;
|
||||
} FSE_DTableHeader; /* sizeof U32 */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned short newState;
|
||||
unsigned char symbol;
|
||||
unsigned char nbBits;
|
||||
} FSE_decode_t; /* size == U32 */
|
||||
|
||||
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
|
||||
{
|
||||
const void* ptr = dt;
|
||||
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
|
||||
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
|
||||
BIT_reloadDStream(bitD);
|
||||
DStatePtr->table = dt + 1;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
return DInfo.symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
}
|
||||
|
||||
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBits(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
/*! FSE_decodeSymbolFast() :
|
||||
unsafe, only works if no symbol has a probability > 50% */
|
||||
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
|
||||
{
|
||||
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
|
||||
U32 const nbBits = DInfo.nbBits;
|
||||
BYTE const symbol = DInfo.symbol;
|
||||
size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
|
||||
|
||||
DStatePtr->state = DInfo.newState + lowBits;
|
||||
return symbol;
|
||||
}
|
||||
|
||||
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
|
||||
{
|
||||
return DStatePtr->state == 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/* **************************************************************
|
||||
* Tuning parameters
|
||||
****************************************************************/
|
||||
/*!MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||
* Increasing memory usage improves compression ratio
|
||||
* Reduced memory usage can improve speed, due to cache effect
|
||||
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
|
||||
#ifndef FSE_MAX_MEMORY_USAGE
|
||||
# define FSE_MAX_MEMORY_USAGE 14
|
||||
#endif
|
||||
#ifndef FSE_DEFAULT_MEMORY_USAGE
|
||||
# define FSE_DEFAULT_MEMORY_USAGE 13
|
||||
#endif
|
||||
#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
|
||||
# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
|
||||
#endif
|
||||
|
||||
/*!FSE_MAX_SYMBOL_VALUE :
|
||||
* Maximum symbol value authorized.
|
||||
* Required for proper stack allocation */
|
||||
#ifndef FSE_MAX_SYMBOL_VALUE
|
||||
# define FSE_MAX_SYMBOL_VALUE 255
|
||||
#endif
|
||||
|
||||
/* **************************************************************
|
||||
* template functions type & suffix
|
||||
****************************************************************/
|
||||
#define FSE_FUNCTION_TYPE BYTE
|
||||
#define FSE_FUNCTION_EXTENSION
|
||||
#define FSE_DECODE_TYPE FSE_decode_t
|
||||
|
||||
|
||||
#endif /* !FSE_COMMONDEFS_ONLY */
|
||||
|
||||
|
||||
/* ***************************************************************
|
||||
* Constants
|
||||
*****************************************************************/
|
||||
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
|
||||
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
|
||||
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
|
||||
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
|
||||
#define FSE_MIN_TABLELOG 5
|
||||
|
||||
#define FSE_TABLELOG_ABSOLUTE_MAX 15
|
||||
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
|
||||
# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
|
||||
#endif
|
||||
|
||||
#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
|
||||
|
||||
|
||||
#endif /* FSE_STATIC_LINKING_ONLY */
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
311
ext/zstd/lib/common/fse_decompress.c
vendored
Normal file
311
ext/zstd/lib/common/fse_decompress.c
vendored
Normal file
@@ -0,0 +1,311 @@
|
||||
/* ******************************************************************
|
||||
* FSE : Finite State Entropy decoder
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include "debug.h" /* assert */
|
||||
#include "bitstream.h"
|
||||
#include "compiler.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "error_private.h"
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "zstd_deps.h"
|
||||
#include "bits.h" /* ZSTD_highbit32 */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
|
||||
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
|
||||
U16* symbolNext = (U16*)workSpace;
|
||||
BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
|
||||
|
||||
U32 const maxSV1 = maxSymbolValue + 1;
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
/* Sanity Checks */
|
||||
if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||
|
||||
/* Init, lay down lowprob symbols */
|
||||
{ FSE_DTableHeader DTableH;
|
||||
DTableH.tableLog = (U16)tableLog;
|
||||
DTableH.fastMode = 1;
|
||||
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
if (normalizedCounter[s]==-1) {
|
||||
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
symbolNext[s] = 1;
|
||||
} else {
|
||||
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
|
||||
symbolNext[s] = normalizedCounter[s];
|
||||
} } }
|
||||
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
if (highThreshold == tableSize - 1) {
|
||||
size_t const tableMask = tableSize-1;
|
||||
size_t const step = FSE_TABLESTEP(tableSize);
|
||||
/* First lay down the symbols in order.
|
||||
* We use a uint64_t to lay down 8 bytes at a time. This reduces branch
|
||||
* misses since small blocks generally have small table logs, so nearly
|
||||
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
|
||||
* our buffer to handle the over-write.
|
||||
*/
|
||||
{
|
||||
U64 const add = 0x0101010101010101ull;
|
||||
size_t pos = 0;
|
||||
U64 sv = 0;
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; ++s, sv += add) {
|
||||
int i;
|
||||
int const n = normalizedCounter[s];
|
||||
MEM_write64(spread + pos, sv);
|
||||
for (i = 8; i < n; i += 8) {
|
||||
MEM_write64(spread + pos + i, sv);
|
||||
}
|
||||
pos += n;
|
||||
}
|
||||
}
|
||||
/* Now we spread those positions across the table.
|
||||
* The benefit of doing it in two stages is that we avoid the
|
||||
* variable size inner loop, which caused lots of branch misses.
|
||||
* Now we can run through all the positions without any branch misses.
|
||||
* We unroll the loop twice, since that is what empirically worked best.
|
||||
*/
|
||||
{
|
||||
size_t position = 0;
|
||||
size_t s;
|
||||
size_t const unroll = 2;
|
||||
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
|
||||
for (s = 0; s < (size_t)tableSize; s += unroll) {
|
||||
size_t u;
|
||||
for (u = 0; u < unroll; ++u) {
|
||||
size_t const uPosition = (position + (u * step)) & tableMask;
|
||||
tableDecode[uPosition].symbol = spread[s + u];
|
||||
}
|
||||
position = (position + (unroll * step)) & tableMask;
|
||||
}
|
||||
assert(position == 0);
|
||||
}
|
||||
} else {
|
||||
U32 const tableMask = tableSize-1;
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 s, position = 0;
|
||||
for (s=0; s<maxSV1; s++) {
|
||||
int i;
|
||||
for (i=0; i<normalizedCounter[s]; i++) {
|
||||
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
|
||||
} }
|
||||
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
|
||||
}
|
||||
|
||||
/* Build Decoding table */
|
||||
{ U32 u;
|
||||
for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
|
||||
U32 const nextState = symbolNext[symbol]++;
|
||||
tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) );
|
||||
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
|
||||
} }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-*******************************************************
|
||||
* Decompression (Byte symbols)
|
||||
*********************************************************/
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
|
||||
void* dst, size_t maxDstSize,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
const FSE_DTable* dt, const unsigned fast)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) dst;
|
||||
BYTE* op = ostart;
|
||||
BYTE* const omax = op + maxDstSize;
|
||||
BYTE* const olimit = omax-3;
|
||||
|
||||
BIT_DStream_t bitD;
|
||||
FSE_DState_t state1;
|
||||
FSE_DState_t state2;
|
||||
|
||||
/* Init */
|
||||
CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
|
||||
|
||||
FSE_initDState(&state1, &bitD, dt);
|
||||
FSE_initDState(&state2, &bitD, dt);
|
||||
|
||||
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
|
||||
|
||||
/* 4 symbols per loop */
|
||||
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
|
||||
op[0] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[1] = FSE_GETSYMBOL(&state2);
|
||||
|
||||
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
|
||||
|
||||
op[2] = FSE_GETSYMBOL(&state1);
|
||||
|
||||
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
|
||||
BIT_reloadDStream(&bitD);
|
||||
|
||||
op[3] = FSE_GETSYMBOL(&state2);
|
||||
}
|
||||
|
||||
/* tail */
|
||||
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
|
||||
while (1) {
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
break;
|
||||
}
|
||||
|
||||
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
|
||||
*op++ = FSE_GETSYMBOL(&state2);
|
||||
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
|
||||
*op++ = FSE_GETSYMBOL(&state1);
|
||||
break;
|
||||
} }
|
||||
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
short ncount[FSE_MAX_SYMBOL_VALUE + 1];
|
||||
FSE_DTable dtable[1]; /* Dynamically sized */
|
||||
} FSE_DecompressWksp;
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* cSrc, size_t cSrcSize,
|
||||
unsigned maxLog, void* workSpace, size_t wkspSize,
|
||||
int bmi2)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*)cSrc;
|
||||
const BYTE* ip = istart;
|
||||
unsigned tableLog;
|
||||
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
|
||||
FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
|
||||
|
||||
DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
|
||||
if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
|
||||
|
||||
/* normal FSE decoding mode */
|
||||
{
|
||||
size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
|
||||
if (FSE_isError(NCountLength)) return NCountLength;
|
||||
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
|
||||
assert(NCountLength <= cSrcSize);
|
||||
ip += NCountLength;
|
||||
cSrcSize -= NCountLength;
|
||||
}
|
||||
|
||||
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize);
|
||||
workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
|
||||
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
|
||||
|
||||
CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
|
||||
|
||||
{
|
||||
const void* ptr = wksp->dtable;
|
||||
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
|
||||
const U32 fastMode = DTableH->fastMode;
|
||||
|
||||
/* select fast mode (static) */
|
||||
if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
|
||||
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Avoids the FORCE_INLINE of the _body() function. */
|
||||
static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
|
||||
}
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
|
||||
{
|
||||
#if DYNAMIC_BMI2
|
||||
if (bmi2) {
|
||||
return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
|
||||
}
|
||||
#endif
|
||||
(void)bmi2;
|
||||
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
|
||||
}
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
||||
273
ext/zstd/lib/common/huf.h
Normal file
273
ext/zstd/lib/common/huf.h
Normal file
@@ -0,0 +1,273 @@
|
||||
/* ******************************************************************
|
||||
* huff0 huffman codec,
|
||||
* part of Finite State Entropy library
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef HUF_H_298734234
|
||||
#define HUF_H_298734234
|
||||
|
||||
/* *** Dependencies *** */
|
||||
#include "zstd_deps.h" /* size_t */
|
||||
#include "mem.h" /* U32 */
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
|
||||
|
||||
/* *** Tool functions *** */
|
||||
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
|
||||
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
|
||||
|
||||
/* Error Management */
|
||||
unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
|
||||
const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
||||
|
||||
|
||||
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
|
||||
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
|
||||
|
||||
/* *** Constants *** */
|
||||
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
|
||||
#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
|
||||
#define HUF_SYMBOLVALUE_MAX 255
|
||||
|
||||
#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
|
||||
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
|
||||
# error "HUF_TABLELOG_MAX is too large !"
|
||||
#endif
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Static allocation
|
||||
******************************************/
|
||||
/* HUF buffer bounds */
|
||||
#define HUF_CTABLEBOUND 129
|
||||
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
|
||||
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
|
||||
|
||||
/* static allocation of HUF's Compression Table */
|
||||
/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
|
||||
typedef size_t HUF_CElt; /* consider it an incomplete type */
|
||||
#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
|
||||
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
|
||||
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
|
||||
HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
|
||||
|
||||
/* static allocation of HUF's DTable */
|
||||
typedef U32 HUF_DTable;
|
||||
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
|
||||
#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
|
||||
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
|
||||
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* Advanced decompression functions
|
||||
******************************************/
|
||||
|
||||
/**
|
||||
* Huffman flags bitset.
|
||||
* For all flags, 0 is the default value.
|
||||
*/
|
||||
typedef enum {
|
||||
/**
|
||||
* If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
|
||||
* Otherwise: Ignored.
|
||||
*/
|
||||
HUF_flags_bmi2 = (1 << 0),
|
||||
/**
|
||||
* If set: Test possible table depths to find the one that produces the smallest header + encoded size.
|
||||
* If unset: Use heuristic to find the table depth.
|
||||
*/
|
||||
HUF_flags_optimalDepth = (1 << 1),
|
||||
/**
|
||||
* If set: If the previous table can encode the input, always reuse the previous table.
|
||||
* If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
|
||||
*/
|
||||
HUF_flags_preferRepeat = (1 << 2),
|
||||
/**
|
||||
* If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
|
||||
* If unset: Always histogram the entire input.
|
||||
*/
|
||||
HUF_flags_suspectUncompressible = (1 << 3),
|
||||
/**
|
||||
* If set: Don't use assembly implementations
|
||||
* If unset: Allow using assembly implementations
|
||||
*/
|
||||
HUF_flags_disableAsm = (1 << 4),
|
||||
/**
|
||||
* If set: Don't use the fast decoding loop, always use the fallback decoding loop.
|
||||
* If unset: Use the fast decoding loop when possible.
|
||||
*/
|
||||
HUF_flags_disableFast = (1 << 5)
|
||||
} HUF_flags_e;
|
||||
|
||||
|
||||
/* ****************************************
|
||||
* HUF detailed API
|
||||
* ****************************************/
|
||||
#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra
|
||||
|
||||
/*! HUF_compress() does the following:
|
||||
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
|
||||
* 2. (optional) refine tableLog using HUF_optimalTableLog()
|
||||
* 3. build Huffman table from count using HUF_buildCTable()
|
||||
* 4. save Huffman table to memory buffer using HUF_writeCTable()
|
||||
* 5. encode the data stream using HUF_compress4X_usingCTable()
|
||||
*
|
||||
* The following API allows targeting specific sub-functions for advanced tasks.
|
||||
* For example, it's possible to compress several blocks using the same 'CTable',
|
||||
* or to save and regenerate 'CTable' using external methods.
|
||||
*/
|
||||
unsigned HUF_minTableLog(unsigned symbolCardinality);
|
||||
unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
|
||||
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
|
||||
size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
|
||||
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
|
||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
|
||||
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
||||
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
|
||||
|
||||
typedef enum {
|
||||
HUF_repeat_none, /**< Cannot use the previous table */
|
||||
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
|
||||
HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
|
||||
} HUF_repeat;
|
||||
|
||||
/** HUF_compress4X_repeat() :
|
||||
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid.
|
||||
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||
size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
|
||||
|
||||
/** HUF_buildCTable_wksp() :
|
||||
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
|
||||
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
|
||||
*/
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
|
||||
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
|
||||
const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
|
||||
void* workSpace, size_t wkspSize);
|
||||
|
||||
/*! HUF_readStats() :
|
||||
* Read compact Huffman tree, saved by HUF_writeCTable().
|
||||
* `huffWeight` is destination buffer.
|
||||
* @return : size read from `src` , or an error Code .
|
||||
* Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
|
||||
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
|
||||
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/*! HUF_readStats_wksp() :
|
||||
* Same as HUF_readStats() but takes an external workspace which must be
|
||||
* 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
|
||||
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
||||
*/
|
||||
#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
|
||||
#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
|
||||
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
|
||||
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workspace, size_t wkspSize,
|
||||
int flags);
|
||||
|
||||
/** HUF_readCTable() :
|
||||
* Loading a CTable saved with HUF_writeCTable() */
|
||||
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
|
||||
|
||||
/** HUF_getNbBitsFromCTable() :
|
||||
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
|
||||
* Note 1 : is not inlined, as HUF_CElt definition is private */
|
||||
U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
|
||||
|
||||
/*
|
||||
* HUF_decompress() does the following:
|
||||
* 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
|
||||
* 2. build Huffman table from save, using HUF_readDTableX?()
|
||||
* 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
|
||||
*/
|
||||
|
||||
/** HUF_selectDecoder() :
|
||||
* Tells which decoder is likely to decode faster,
|
||||
* based on a set of pre-computed metrics.
|
||||
* @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
|
||||
* Assumption : 0 < dstSize <= 128 KB */
|
||||
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
|
||||
|
||||
/**
|
||||
* The minimum workspace size for the `workSpace` used in
|
||||
* HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
|
||||
*
|
||||
* The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
|
||||
* HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
|
||||
* Buffer overflow errors may potentially occur if code modifications result in
|
||||
* a required workspace size greater than that specified in the following
|
||||
* macro.
|
||||
*/
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
|
||||
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
|
||||
|
||||
|
||||
/* ====================== */
|
||||
/* single stream variants */
|
||||
/* ====================== */
|
||||
|
||||
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
|
||||
/** HUF_compress1X_repeat() :
|
||||
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
|
||||
* If it uses hufTable it does not modify hufTable or repeat.
|
||||
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
|
||||
* If preferRepeat then the old table will always be used if valid.
|
||||
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
|
||||
size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
|
||||
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
|
||||
|
||||
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X1
|
||||
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */
|
||||
#endif
|
||||
|
||||
/* BMI2 variants.
|
||||
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
|
||||
*/
|
||||
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X2
|
||||
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#endif
|
||||
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
|
||||
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X2
|
||||
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#endif
|
||||
#ifndef HUF_FORCE_DECOMPRESS_X1
|
||||
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
|
||||
#endif
|
||||
|
||||
#endif /* HUF_H_298734234 */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
435
ext/zstd/lib/common/mem.h
Normal file
435
ext/zstd/lib/common/mem.h
Normal file
@@ -0,0 +1,435 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef MEM_H_MODULE
|
||||
#define MEM_H_MODULE
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-****************************************
|
||||
* Dependencies
|
||||
******************************************/
|
||||
#include <stddef.h> /* size_t, ptrdiff_t */
|
||||
#include "compiler.h" /* __has_builtin */
|
||||
#include "debug.h" /* DEBUG_STATIC_ASSERT */
|
||||
#include "zstd_deps.h" /* ZSTD_memcpy */
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Compiler specifics
|
||||
******************************************/
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
# include <stdlib.h> /* _byteswap_ulong */
|
||||
# include <intrin.h> /* _byteswap_* */
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
# define MEM_STATIC static __inline __attribute__((unused))
|
||||
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# define MEM_STATIC static inline
|
||||
#elif defined(_MSC_VER)
|
||||
# define MEM_STATIC static __inline
|
||||
#else
|
||||
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
||||
#endif
|
||||
|
||||
/*-**************************************************************
|
||||
* Basic Types
|
||||
*****************************************************************/
|
||||
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# if defined(_AIX)
|
||||
# include <inttypes.h>
|
||||
# else
|
||||
# include <stdint.h> /* intptr_t */
|
||||
# endif
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint8_t U8;
|
||||
typedef int8_t S8;
|
||||
typedef uint16_t U16;
|
||||
typedef int16_t S16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef int64_t S64;
|
||||
#else
|
||||
# include <limits.h>
|
||||
#if CHAR_BIT != 8
|
||||
# error "this implementation requires char to be exactly 8-bit type"
|
||||
#endif
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned char U8;
|
||||
typedef signed char S8;
|
||||
#if USHRT_MAX != 65535
|
||||
# error "this implementation requires short to be exactly 16-bit type"
|
||||
#endif
|
||||
typedef unsigned short U16;
|
||||
typedef signed short S16;
|
||||
#if UINT_MAX != 4294967295
|
||||
# error "this implementation requires int to be exactly 32-bit type"
|
||||
#endif
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
/* note : there are no limits defined for long long type in C90.
|
||||
* limits exist in C99, however, in such case, <stdint.h> is preferred */
|
||||
typedef unsigned long long U64;
|
||||
typedef signed long long S64;
|
||||
#endif
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Memory I/O API
|
||||
*****************************************************************/
|
||||
/*=== Static platform detection ===*/
|
||||
MEM_STATIC unsigned MEM_32bits(void);
|
||||
MEM_STATIC unsigned MEM_64bits(void);
|
||||
MEM_STATIC unsigned MEM_isLittleEndian(void);
|
||||
|
||||
/*=== Native unaligned read/write ===*/
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr);
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr);
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr);
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr);
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value);
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value);
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value);
|
||||
|
||||
/*=== Little endian unaligned read/write ===*/
|
||||
MEM_STATIC U16 MEM_readLE16(const void* memPtr);
|
||||
MEM_STATIC U32 MEM_readLE24(const void* memPtr);
|
||||
MEM_STATIC U32 MEM_readLE32(const void* memPtr);
|
||||
MEM_STATIC U64 MEM_readLE64(const void* memPtr);
|
||||
MEM_STATIC size_t MEM_readLEST(const void* memPtr);
|
||||
|
||||
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
|
||||
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
|
||||
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
|
||||
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
|
||||
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
|
||||
|
||||
/*=== Big endian unaligned read/write ===*/
|
||||
MEM_STATIC U32 MEM_readBE32(const void* memPtr);
|
||||
MEM_STATIC U64 MEM_readBE64(const void* memPtr);
|
||||
MEM_STATIC size_t MEM_readBEST(const void* memPtr);
|
||||
|
||||
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
|
||||
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
|
||||
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
|
||||
|
||||
/*=== Byteswap ===*/
|
||||
MEM_STATIC U32 MEM_swap32(U32 in);
|
||||
MEM_STATIC U64 MEM_swap64(U64 in);
|
||||
MEM_STATIC size_t MEM_swapST(size_t in);
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* Memory I/O Implementation
|
||||
*****************************************************************/
|
||||
/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
|
||||
* Method 0 : always use `memcpy()`. Safe and portable.
|
||||
* Method 1 : Use compiler extension to set unaligned access.
|
||||
* Method 2 : direct access. This method is portable but violate C standard.
|
||||
* It can generate buggy code on targets depending on alignment.
|
||||
* Default : method 1 if supported, else method 0
|
||||
*/
|
||||
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
||||
# ifdef __GNUC__
|
||||
# define MEM_FORCE_MEMORY_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
|
||||
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
|
||||
|
||||
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
||||
{
|
||||
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
return 1;
|
||||
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
|
||||
return 0;
|
||||
#elif defined(__clang__) && __LITTLE_ENDIAN__
|
||||
return 1;
|
||||
#elif defined(__clang__) && __BIG_ENDIAN__
|
||||
return 0;
|
||||
#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86)
|
||||
return 1;
|
||||
#elif defined(__DMC__) && defined(_M_IX86)
|
||||
return 1;
|
||||
#else
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
return one.c[0];
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
||||
|
||||
/* violates C standard, by lying on structure alignment.
|
||||
Only use if no other choice to achieve best performance on target platform */
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
||||
|
||||
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
||||
|
||||
typedef __attribute__((aligned(1))) U16 unalign16;
|
||||
typedef __attribute__((aligned(1))) U32 unalign32;
|
||||
typedef __attribute__((aligned(1))) U64 unalign64;
|
||||
typedef __attribute__((aligned(1))) size_t unalignArch;
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
|
||||
MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
|
||||
MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
|
||||
MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
|
||||
|
||||
#else
|
||||
|
||||
/* default method, safe and standard.
|
||||
can sometimes prove slower */
|
||||
|
||||
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
||||
{
|
||||
U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
||||
{
|
||||
U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
||||
{
|
||||
U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
||||
{
|
||||
size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
||||
{
|
||||
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
||||
{
|
||||
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
||||
{
|
||||
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
||||
}
|
||||
|
||||
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
||||
|
||||
MEM_STATIC U32 MEM_swap32_fallback(U32 in)
|
||||
{
|
||||
return ((in << 24) & 0xff000000 ) |
|
||||
((in << 8) & 0x00ff0000 ) |
|
||||
((in >> 8) & 0x0000ff00 ) |
|
||||
((in >> 24) & 0x000000ff );
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_swap32(U32 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_ulong(in);
|
||||
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
||||
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
|
||||
return __builtin_bswap32(in);
|
||||
#else
|
||||
return MEM_swap32_fallback(in);
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_swap64_fallback(U64 in)
|
||||
{
|
||||
return ((in << 56) & 0xff00000000000000ULL) |
|
||||
((in << 40) & 0x00ff000000000000ULL) |
|
||||
((in << 24) & 0x0000ff0000000000ULL) |
|
||||
((in << 8) & 0x000000ff00000000ULL) |
|
||||
((in >> 8) & 0x00000000ff000000ULL) |
|
||||
((in >> 24) & 0x0000000000ff0000ULL) |
|
||||
((in >> 40) & 0x000000000000ff00ULL) |
|
||||
((in >> 56) & 0x00000000000000ffULL);
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_swap64(U64 in)
|
||||
{
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
return _byteswap_uint64(in);
|
||||
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
||||
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
||||
return __builtin_bswap64(in);
|
||||
#else
|
||||
return MEM_swap64_fallback(in);
|
||||
#endif
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_swapST(size_t in)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_swap32((U32)in);
|
||||
else
|
||||
return (size_t)MEM_swap64((U64)in);
|
||||
}
|
||||
|
||||
/*=== Little endian r/w ===*/
|
||||
|
||||
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read16(memPtr);
|
||||
else {
|
||||
const BYTE* p = (const BYTE*)memPtr;
|
||||
return (U16)(p[0] + (p[1]<<8));
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
||||
{
|
||||
if (MEM_isLittleEndian()) {
|
||||
MEM_write16(memPtr, val);
|
||||
} else {
|
||||
BYTE* p = (BYTE*)memPtr;
|
||||
p[0] = (BYTE)val;
|
||||
p[1] = (BYTE)(val>>8);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
||||
{
|
||||
return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
|
||||
{
|
||||
MEM_writeLE16(memPtr, (U16)val);
|
||||
((BYTE*)memPtr)[2] = (BYTE)(val>>16);
|
||||
}
|
||||
|
||||
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read32(memPtr);
|
||||
else
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, val32);
|
||||
else
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_read64(memPtr);
|
||||
else
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, val64);
|
||||
else
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readLE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readLE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeLE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeLE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
/*=== Big endian r/w ===*/
|
||||
|
||||
MEM_STATIC U32 MEM_readBE32(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap32(MEM_read32(memPtr));
|
||||
else
|
||||
return MEM_read32(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write32(memPtr, MEM_swap32(val32));
|
||||
else
|
||||
MEM_write32(memPtr, val32);
|
||||
}
|
||||
|
||||
MEM_STATIC U64 MEM_readBE64(const void* memPtr)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
return MEM_swap64(MEM_read64(memPtr));
|
||||
else
|
||||
return MEM_read64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
|
||||
{
|
||||
if (MEM_isLittleEndian())
|
||||
MEM_write64(memPtr, MEM_swap64(val64));
|
||||
else
|
||||
MEM_write64(memPtr, val64);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t MEM_readBEST(const void* memPtr)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
return (size_t)MEM_readBE32(memPtr);
|
||||
else
|
||||
return (size_t)MEM_readBE64(memPtr);
|
||||
}
|
||||
|
||||
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
||||
{
|
||||
if (MEM_32bits())
|
||||
MEM_writeBE32(memPtr, (U32)val);
|
||||
else
|
||||
MEM_writeBE64(memPtr, (U64)val);
|
||||
}
|
||||
|
||||
/* code only tested on 32 and 64 bits systems */
|
||||
MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* MEM_H_MODULE */
|
||||
371
ext/zstd/lib/common/pool.c
vendored
Normal file
371
ext/zstd/lib/common/pool.c
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
/* ====== Dependencies ======= */
|
||||
#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
|
||||
#include "zstd_deps.h" /* size_t */
|
||||
#include "debug.h" /* assert */
|
||||
#include "pool.h"
|
||||
|
||||
/* ====== Compiler specifics ====== */
|
||||
#if defined(_MSC_VER)
|
||||
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ZSTD_MULTITHREAD
|
||||
|
||||
#include "threading.h" /* pthread adaptation */
|
||||
|
||||
/* A job is a function and an opaque argument */
|
||||
typedef struct POOL_job_s {
|
||||
POOL_function function;
|
||||
void *opaque;
|
||||
} POOL_job;
|
||||
|
||||
struct POOL_ctx_s {
|
||||
ZSTD_customMem customMem;
|
||||
/* Keep track of the threads */
|
||||
ZSTD_pthread_t* threads;
|
||||
size_t threadCapacity;
|
||||
size_t threadLimit;
|
||||
|
||||
/* The queue is a circular buffer */
|
||||
POOL_job *queue;
|
||||
size_t queueHead;
|
||||
size_t queueTail;
|
||||
size_t queueSize;
|
||||
|
||||
/* The number of threads working on jobs */
|
||||
size_t numThreadsBusy;
|
||||
/* Indicates if the queue is empty */
|
||||
int queueEmpty;
|
||||
|
||||
/* The mutex protects the queue */
|
||||
ZSTD_pthread_mutex_t queueMutex;
|
||||
/* Condition variable for pushers to wait on when the queue is full */
|
||||
ZSTD_pthread_cond_t queuePushCond;
|
||||
/* Condition variables for poppers to wait on when the queue is empty */
|
||||
ZSTD_pthread_cond_t queuePopCond;
|
||||
/* Indicates if the queue is shutting down */
|
||||
int shutdown;
|
||||
};
|
||||
|
||||
/* POOL_thread() :
|
||||
* Work thread for the thread pool.
|
||||
* Waits for jobs and executes them.
|
||||
* @returns : NULL on failure else non-null.
|
||||
*/
|
||||
static void* POOL_thread(void* opaque) {
|
||||
POOL_ctx* const ctx = (POOL_ctx*)opaque;
|
||||
if (!ctx) { return NULL; }
|
||||
for (;;) {
|
||||
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
|
||||
while ( ctx->queueEmpty
|
||||
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
|
||||
if (ctx->shutdown) {
|
||||
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
|
||||
* a few threads will be shutdown while !queueEmpty,
|
||||
* but enough threads will remain active to finish the queue */
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return opaque;
|
||||
}
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
||||
}
|
||||
/* Pop a job off the queue */
|
||||
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
||||
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
||||
ctx->numThreadsBusy++;
|
||||
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
|
||||
/* Unlock the mutex, signal a pusher, and run the job */
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
|
||||
job.function(job.opaque);
|
||||
|
||||
/* If the intended queue size was 0, signal after finishing job */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->numThreadsBusy--;
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
} /* for (;;) */
|
||||
assert(0); /* Unreachable */
|
||||
}
|
||||
|
||||
/* ZSTD_createThreadPool() : public access point */
|
||||
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
|
||||
return POOL_create (numThreads, 0);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
||||
ZSTD_customMem customMem)
|
||||
{
|
||||
POOL_ctx* ctx;
|
||||
/* Check parameters */
|
||||
if (!numThreads) { return NULL; }
|
||||
/* Allocate the context and zero initialize */
|
||||
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
|
||||
if (!ctx) { return NULL; }
|
||||
/* Initialize the job queue.
|
||||
* It needs one extra space since one space is wasted to differentiate
|
||||
* empty and full queues.
|
||||
*/
|
||||
ctx->queueSize = queueSize + 1;
|
||||
ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
||||
ctx->queueHead = 0;
|
||||
ctx->queueTail = 0;
|
||||
ctx->numThreadsBusy = 0;
|
||||
ctx->queueEmpty = 1;
|
||||
{
|
||||
int error = 0;
|
||||
error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
|
||||
error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
|
||||
error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
|
||||
if (error) { POOL_free(ctx); return NULL; }
|
||||
}
|
||||
ctx->shutdown = 0;
|
||||
/* Allocate space for the thread handles */
|
||||
ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
||||
ctx->threadCapacity = 0;
|
||||
ctx->customMem = customMem;
|
||||
/* Check for errors */
|
||||
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
|
||||
/* Initialize the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < numThreads; ++i) {
|
||||
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
|
||||
ctx->threadCapacity = i;
|
||||
POOL_free(ctx);
|
||||
return NULL;
|
||||
} }
|
||||
ctx->threadCapacity = numThreads;
|
||||
ctx->threadLimit = numThreads;
|
||||
}
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/*! POOL_join() :
|
||||
Shutdown the queue, wake any sleeping threads, and join all of the threads.
|
||||
*/
|
||||
static void POOL_join(POOL_ctx* ctx) {
|
||||
/* Shut down the queue */
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
ctx->shutdown = 1;
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
/* Wake up sleeping threads */
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
||||
/* Join all of the threads */
|
||||
{ size_t i;
|
||||
for (i = 0; i < ctx->threadCapacity; ++i) {
|
||||
ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
|
||||
} }
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx *ctx) {
|
||||
if (!ctx) { return; }
|
||||
POOL_join(ctx);
|
||||
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
||||
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
||||
ZSTD_customFree(ctx->queue, ctx->customMem);
|
||||
ZSTD_customFree(ctx->threads, ctx->customMem);
|
||||
ZSTD_customFree(ctx, ctx->customMem);
|
||||
}
|
||||
|
||||
/*! POOL_joinJobs() :
|
||||
* Waits for all queued jobs to finish executing.
|
||||
*/
|
||||
void POOL_joinJobs(POOL_ctx* ctx) {
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
|
||||
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
||||
POOL_free (pool);
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
return sizeof(*ctx)
|
||||
+ ctx->queueSize * sizeof(POOL_job)
|
||||
+ ctx->threadCapacity * sizeof(ZSTD_pthread_t);
|
||||
}
|
||||
|
||||
|
||||
/* @return : 0 on success, 1 on error */
|
||||
static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
||||
{
|
||||
if (numThreads <= ctx->threadCapacity) {
|
||||
if (!numThreads) return 1;
|
||||
ctx->threadLimit = numThreads;
|
||||
return 0;
|
||||
}
|
||||
/* numThreads > threadCapacity */
|
||||
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
||||
if (!threadPool) return 1;
|
||||
/* replace existing thread pool */
|
||||
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
|
||||
ZSTD_customFree(ctx->threads, ctx->customMem);
|
||||
ctx->threads = threadPool;
|
||||
/* Initialize additional threads */
|
||||
{ size_t threadId;
|
||||
for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
|
||||
if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
|
||||
ctx->threadCapacity = threadId;
|
||||
return 1;
|
||||
} }
|
||||
} }
|
||||
/* successfully expanded */
|
||||
ctx->threadCapacity = numThreads;
|
||||
ctx->threadLimit = numThreads;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @return : 0 on success, 1 on error */
|
||||
int POOL_resize(POOL_ctx* ctx, size_t numThreads)
|
||||
{
|
||||
int result;
|
||||
if (ctx==NULL) return 1;
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
result = POOL_resize_internal(ctx, numThreads);
|
||||
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns 1 if the queue is full and 0 otherwise.
|
||||
*
|
||||
* When queueSize is 1 (pool was created with an intended queueSize of 0),
|
||||
* then a queue is empty if there is a thread free _and_ no job is waiting.
|
||||
*/
|
||||
static int isQueueFull(POOL_ctx const* ctx) {
|
||||
if (ctx->queueSize > 1) {
|
||||
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
||||
} else {
|
||||
return (ctx->numThreadsBusy == ctx->threadLimit) ||
|
||||
!ctx->queueEmpty;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
||||
{
|
||||
POOL_job job;
|
||||
job.function = function;
|
||||
job.opaque = opaque;
|
||||
assert(ctx != NULL);
|
||||
if (ctx->shutdown) return;
|
||||
|
||||
ctx->queueEmpty = 0;
|
||||
ctx->queue[ctx->queueTail] = job;
|
||||
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
||||
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
|
||||
}
|
||||
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
/* Wait until there is space in the queue for the new job */
|
||||
while (isQueueFull(ctx) && (!ctx->shutdown)) {
|
||||
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
}
|
||||
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
||||
{
|
||||
assert(ctx != NULL);
|
||||
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
||||
if (isQueueFull(ctx)) {
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 0;
|
||||
}
|
||||
POOL_add_internal(ctx, function, opaque);
|
||||
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
|
||||
/* ========================== */
|
||||
/* No multi-threading support */
|
||||
/* ========================== */
|
||||
|
||||
|
||||
/* We don't need any data, but if it is empty, malloc() might return NULL. */
|
||||
struct POOL_ctx_s {
|
||||
int dummy;
|
||||
};
|
||||
static POOL_ctx g_poolCtx;
|
||||
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
||||
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
||||
}
|
||||
|
||||
POOL_ctx*
|
||||
POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
|
||||
{
|
||||
(void)numThreads;
|
||||
(void)queueSize;
|
||||
(void)customMem;
|
||||
return &g_poolCtx;
|
||||
}
|
||||
|
||||
void POOL_free(POOL_ctx* ctx) {
|
||||
assert(!ctx || ctx == &g_poolCtx);
|
||||
(void)ctx;
|
||||
}
|
||||
|
||||
void POOL_joinJobs(POOL_ctx* ctx){
|
||||
assert(!ctx || ctx == &g_poolCtx);
|
||||
(void)ctx;
|
||||
}
|
||||
|
||||
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
|
||||
(void)ctx; (void)numThreads;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
}
|
||||
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
||||
(void)ctx;
|
||||
function(opaque);
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
||||
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
||||
assert(ctx == &g_poolCtx);
|
||||
return sizeof(*ctx);
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
90
ext/zstd/lib/common/pool.h
Normal file
90
ext/zstd/lib/common/pool.h
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef POOL_H
|
||||
#define POOL_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#include "zstd_deps.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
||||
#include "../zstd.h"
|
||||
|
||||
typedef struct POOL_ctx_s POOL_ctx;
|
||||
|
||||
/*! POOL_create() :
|
||||
* Create a thread pool with at most `numThreads` threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* The maximum number of queued jobs before blocking is `queueSize`.
|
||||
* @return : POOL_ctx pointer on success, else NULL.
|
||||
*/
|
||||
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
|
||||
|
||||
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
||||
ZSTD_customMem customMem);
|
||||
|
||||
/*! POOL_free() :
|
||||
* Free a thread pool returned by POOL_create().
|
||||
*/
|
||||
void POOL_free(POOL_ctx* ctx);
|
||||
|
||||
|
||||
/*! POOL_joinJobs() :
|
||||
* Waits for all queued jobs to finish executing.
|
||||
*/
|
||||
void POOL_joinJobs(POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_resize() :
|
||||
* Expands or shrinks pool's number of threads.
|
||||
* This is more efficient than releasing + creating a new context,
|
||||
* since it tries to preserve and re-use existing threads.
|
||||
* `numThreads` must be at least 1.
|
||||
* @return : 0 when resize was successful,
|
||||
* !0 (typically 1) if there is an error.
|
||||
* note : only numThreads can be resized, queueSize remains unchanged.
|
||||
*/
|
||||
int POOL_resize(POOL_ctx* ctx, size_t numThreads);
|
||||
|
||||
/*! POOL_sizeof() :
|
||||
* @return threadpool memory usage
|
||||
* note : compatible with NULL (returns 0 in this case)
|
||||
*/
|
||||
size_t POOL_sizeof(const POOL_ctx* ctx);
|
||||
|
||||
/*! POOL_function :
|
||||
* The function type that can be added to a thread pool.
|
||||
*/
|
||||
typedef void (*POOL_function)(void*);
|
||||
|
||||
/*! POOL_add() :
|
||||
* Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
|
||||
* Possibly blocks until there is room in the queue.
|
||||
* Note : The function may be executed asynchronously,
|
||||
* therefore, `opaque` must live until function has been completed.
|
||||
*/
|
||||
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
/*! POOL_tryAdd() :
|
||||
* Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
|
||||
* Returns immediately even if not (does not block).
|
||||
* @return : 1 if successful, 0 if not.
|
||||
*/
|
||||
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
156
ext/zstd/lib/common/portability_macros.h
Normal file
156
ext/zstd/lib/common/portability_macros.h
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_PORTABILITY_MACROS_H
|
||||
#define ZSTD_PORTABILITY_MACROS_H
|
||||
|
||||
/**
|
||||
* This header file contains macro definitions to support portability.
|
||||
* This header is shared between C and ASM code, so it MUST only
|
||||
* contain macro definitions. It MUST not contain any C code.
|
||||
*
|
||||
* This header ONLY defines macros to detect platforms/feature support.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/* compat. with non-clang compilers */
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
|
||||
/* compat. with non-clang compilers */
|
||||
#ifndef __has_builtin
|
||||
# define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
/* compat. with non-clang compilers */
|
||||
#ifndef __has_feature
|
||||
# define __has_feature(x) 0
|
||||
#endif
|
||||
|
||||
/* detects whether we are being compiled under msan */
|
||||
#ifndef ZSTD_MEMORY_SANITIZER
|
||||
# if __has_feature(memory_sanitizer)
|
||||
# define ZSTD_MEMORY_SANITIZER 1
|
||||
# else
|
||||
# define ZSTD_MEMORY_SANITIZER 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* detects whether we are being compiled under asan */
|
||||
#ifndef ZSTD_ADDRESS_SANITIZER
|
||||
# if __has_feature(address_sanitizer)
|
||||
# define ZSTD_ADDRESS_SANITIZER 1
|
||||
# elif defined(__SANITIZE_ADDRESS__)
|
||||
# define ZSTD_ADDRESS_SANITIZER 1
|
||||
# else
|
||||
# define ZSTD_ADDRESS_SANITIZER 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* detects whether we are being compiled under dfsan */
|
||||
#ifndef ZSTD_DATAFLOW_SANITIZER
|
||||
# if __has_feature(dataflow_sanitizer)
|
||||
# define ZSTD_DATAFLOW_SANITIZER 1
|
||||
# else
|
||||
# define ZSTD_DATAFLOW_SANITIZER 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Mark the internal assembly functions as hidden */
|
||||
#ifdef __ELF__
|
||||
# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
|
||||
#else
|
||||
# define ZSTD_HIDE_ASM_FUNCTION(func)
|
||||
#endif
|
||||
|
||||
/* Enable runtime BMI2 dispatch based on the CPU.
|
||||
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
||||
*/
|
||||
#ifndef DYNAMIC_BMI2
|
||||
#if ((defined(__clang__) && __has_attribute(__target__)) \
|
||||
|| (defined(__GNUC__) \
|
||||
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
|
||||
&& (defined(__x86_64__) || defined(_M_X64)) \
|
||||
&& !defined(__BMI2__)
|
||||
# define DYNAMIC_BMI2 1
|
||||
#else
|
||||
# define DYNAMIC_BMI2 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Only enable assembly for GNUC compatible compilers,
|
||||
* because other platforms may not support GAS assembly syntax.
|
||||
*
|
||||
* Only enable assembly for Linux / MacOS, other platforms may
|
||||
* work, but they haven't been tested. This could likely be
|
||||
* extended to BSD systems.
|
||||
*
|
||||
* Disable assembly when MSAN is enabled, because MSAN requires
|
||||
* 100% of code to be instrumented to work.
|
||||
*/
|
||||
#if defined(__GNUC__)
|
||||
# if defined(__linux__) || defined(__linux) || defined(__APPLE__)
|
||||
# if ZSTD_MEMORY_SANITIZER
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
# elif ZSTD_DATAFLOW_SANITIZER
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
# else
|
||||
# define ZSTD_ASM_SUPPORTED 1
|
||||
# endif
|
||||
# else
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
# endif
|
||||
#else
|
||||
# define ZSTD_ASM_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Determines whether we should enable assembly for x86-64
|
||||
* with BMI2.
|
||||
*
|
||||
* Enable if all of the following conditions hold:
|
||||
* - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
|
||||
* - Assembly is supported
|
||||
* - We are compiling for x86-64 and either:
|
||||
* - DYNAMIC_BMI2 is enabled
|
||||
* - BMI2 is supported at compile time
|
||||
*/
|
||||
#if !defined(ZSTD_DISABLE_ASM) && \
|
||||
ZSTD_ASM_SUPPORTED && \
|
||||
defined(__x86_64__) && \
|
||||
(DYNAMIC_BMI2 || defined(__BMI2__))
|
||||
# define ZSTD_ENABLE_ASM_X86_64_BMI2 1
|
||||
#else
|
||||
# define ZSTD_ENABLE_ASM_X86_64_BMI2 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For x86 ELF targets, add .note.gnu.property section for Intel CET in
|
||||
* assembly sources when CET is enabled.
|
||||
*
|
||||
* Additionally, any function that may be called indirectly must begin
|
||||
* with ZSTD_CET_ENDBRANCH.
|
||||
*/
|
||||
#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \
|
||||
&& defined(__has_include)
|
||||
# if __has_include(<cet.h>)
|
||||
# include <cet.h>
|
||||
# define ZSTD_CET_ENDBRANCH _CET_ENDBR
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_CET_ENDBRANCH
|
||||
# define ZSTD_CET_ENDBRANCH
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_PORTABILITY_MACROS_H */
|
||||
176
ext/zstd/lib/common/threading.c
vendored
Normal file
176
ext/zstd/lib/common/threading.c
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This file will hold wrapper for systems, which do not support pthreads
|
||||
*/
|
||||
|
||||
#include "threading.h"
|
||||
|
||||
/* create fake symbol to avoid empty translation unit warning */
|
||||
int g_ZSTD_threading_useless_symbol;
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper
|
||||
*/
|
||||
|
||||
|
||||
/* === Dependencies === */
|
||||
#include <process.h>
|
||||
#include <errno.h>
|
||||
|
||||
|
||||
/* === Implementation === */
|
||||
|
||||
typedef struct {
|
||||
void* (*start_routine)(void*);
|
||||
void* arg;
|
||||
int initialized;
|
||||
ZSTD_pthread_cond_t initialized_cond;
|
||||
ZSTD_pthread_mutex_t initialized_mutex;
|
||||
} ZSTD_thread_params_t;
|
||||
|
||||
static unsigned __stdcall worker(void *arg)
|
||||
{
|
||||
void* (*start_routine)(void*);
|
||||
void* thread_arg;
|
||||
|
||||
/* Initialized thread_arg and start_routine and signal main thread that we don't need it
|
||||
* to wait any longer.
|
||||
*/
|
||||
{
|
||||
ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg;
|
||||
thread_arg = thread_param->arg;
|
||||
start_routine = thread_param->start_routine;
|
||||
|
||||
/* Signal main thread that we are running and do not depend on its memory anymore */
|
||||
ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex);
|
||||
thread_param->initialized = 1;
|
||||
ZSTD_pthread_cond_signal(&thread_param->initialized_cond);
|
||||
ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex);
|
||||
}
|
||||
|
||||
start_routine(thread_arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg)
|
||||
{
|
||||
ZSTD_thread_params_t thread_param;
|
||||
(void)unused;
|
||||
|
||||
thread_param.start_routine = start_routine;
|
||||
thread_param.arg = arg;
|
||||
thread_param.initialized = 0;
|
||||
*thread = NULL;
|
||||
|
||||
/* Setup thread initialization synchronization */
|
||||
if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) {
|
||||
/* Should never happen on Windows */
|
||||
return -1;
|
||||
}
|
||||
if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) {
|
||||
/* Should never happen on Windows */
|
||||
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Spawn thread */
|
||||
*thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL);
|
||||
if (!thread) {
|
||||
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
|
||||
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
||||
return errno;
|
||||
}
|
||||
|
||||
/* Wait for thread to be initialized */
|
||||
ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex);
|
||||
while(!thread_param.initialized) {
|
||||
ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex);
|
||||
}
|
||||
ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex);
|
||||
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
|
||||
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread)
|
||||
{
|
||||
DWORD result;
|
||||
|
||||
if (!thread) return 0;
|
||||
|
||||
result = WaitForSingleObject(thread, INFINITE);
|
||||
CloseHandle(thread);
|
||||
|
||||
switch (result) {
|
||||
case WAIT_OBJECT_0:
|
||||
return 0;
|
||||
case WAIT_ABANDONED:
|
||||
return EINVAL;
|
||||
default:
|
||||
return GetLastError();
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
|
||||
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "zstd_deps.h"
|
||||
|
||||
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
|
||||
{
|
||||
*mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
|
||||
if (!*mutex)
|
||||
return 1;
|
||||
return pthread_mutex_init(*mutex, attr);
|
||||
}
|
||||
|
||||
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
|
||||
{
|
||||
if (!*mutex)
|
||||
return 0;
|
||||
{
|
||||
int const ret = pthread_mutex_destroy(*mutex);
|
||||
ZSTD_free(*mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
|
||||
{
|
||||
*cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
|
||||
if (!*cond)
|
||||
return 1;
|
||||
return pthread_cond_init(*cond, attr);
|
||||
}
|
||||
|
||||
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
|
||||
{
|
||||
if (!*cond)
|
||||
return 0;
|
||||
{
|
||||
int const ret = pthread_cond_destroy(*cond);
|
||||
ZSTD_free(*cond);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
150
ext/zstd/lib/common/threading.h
Normal file
150
ext/zstd/lib/common/threading.h
Normal file
@@ -0,0 +1,150 @@
|
||||
/**
|
||||
* Copyright (c) 2016 Tino Reichardt
|
||||
* All rights reserved.
|
||||
*
|
||||
* You can contact the author at:
|
||||
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef THREADING_H_938743
|
||||
#define THREADING_H_938743
|
||||
|
||||
#include "debug.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
||||
|
||||
/**
|
||||
* Windows minimalist Pthread Wrapper
|
||||
*/
|
||||
#ifdef WINVER
|
||||
# undef WINVER
|
||||
#endif
|
||||
#define WINVER 0x0600
|
||||
|
||||
#ifdef _WIN32_WINNT
|
||||
# undef _WIN32_WINNT
|
||||
#endif
|
||||
#define _WIN32_WINNT 0x0600
|
||||
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
|
||||
#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
|
||||
#include <windows.h>
|
||||
#undef ERROR
|
||||
#define ERROR(name) ZSTD_ERROR(name)
|
||||
|
||||
|
||||
/* mutex */
|
||||
#define ZSTD_pthread_mutex_t CRITICAL_SECTION
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
|
||||
|
||||
/* condition variable */
|
||||
#define ZSTD_pthread_cond_t CONDITION_VARIABLE
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
|
||||
#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
|
||||
|
||||
/* ZSTD_pthread_create() and ZSTD_pthread_join() */
|
||||
typedef HANDLE ZSTD_pthread_t;
|
||||
|
||||
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
||||
void* (*start_routine) (void*), void* arg);
|
||||
|
||||
int ZSTD_pthread_join(ZSTD_pthread_t thread);
|
||||
|
||||
/**
|
||||
* add here more wrappers as required
|
||||
*/
|
||||
|
||||
|
||||
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
|
||||
/* === POSIX Systems === */
|
||||
# include <pthread.h>
|
||||
|
||||
#if DEBUGLEVEL < 1
|
||||
|
||||
#define ZSTD_pthread_mutex_t pthread_mutex_t
|
||||
#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
|
||||
#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
|
||||
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
|
||||
|
||||
#define ZSTD_pthread_cond_t pthread_cond_t
|
||||
#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
|
||||
#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
|
||||
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
|
||||
|
||||
#define ZSTD_pthread_t pthread_t
|
||||
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
||||
#define ZSTD_pthread_join(a) pthread_join((a),NULL)
|
||||
|
||||
#else /* DEBUGLEVEL >= 1 */
|
||||
|
||||
/* Debug implementation of threading.
|
||||
* In this implementation we use pointers for mutexes and condition variables.
|
||||
* This way, if we forget to init/destroy them the program will crash or ASAN
|
||||
* will report leaks.
|
||||
*/
|
||||
|
||||
#define ZSTD_pthread_mutex_t pthread_mutex_t*
|
||||
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
|
||||
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
|
||||
#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a))
|
||||
|
||||
#define ZSTD_pthread_cond_t pthread_cond_t*
|
||||
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
|
||||
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
|
||||
#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b))
|
||||
#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a))
|
||||
|
||||
#define ZSTD_pthread_t pthread_t
|
||||
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
||||
#define ZSTD_pthread_join(a) pthread_join((a),NULL)
|
||||
|
||||
#endif
|
||||
|
||||
#else /* ZSTD_MULTITHREAD not defined */
|
||||
/* No multithreading support */
|
||||
|
||||
typedef int ZSTD_pthread_mutex_t;
|
||||
#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_lock(a) ((void)(a))
|
||||
#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
|
||||
|
||||
typedef int ZSTD_pthread_cond_t;
|
||||
#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
|
||||
#define ZSTD_pthread_cond_destroy(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
|
||||
#define ZSTD_pthread_cond_signal(a) ((void)(a))
|
||||
#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
|
||||
|
||||
/* do not use ZSTD_pthread_t */
|
||||
|
||||
#endif /* ZSTD_MULTITHREAD */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* THREADING_H_938743 */
|
||||
24
ext/zstd/lib/common/xxhash.c
vendored
Normal file
24
ext/zstd/lib/common/xxhash.c
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* xxHash - Fast Hash algorithm
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - xxHash homepage: https://cyan4973.github.io/xxHash/
|
||||
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* xxhash.c instantiates functions defined in xxhash.h
|
||||
*/
|
||||
|
||||
#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
|
||||
#define XXH_IMPLEMENTATION /* access definitions */
|
||||
|
||||
#include "xxhash.h"
|
||||
5686
ext/zstd/lib/common/xxhash.h
Normal file
5686
ext/zstd/lib/common/xxhash.h
Normal file
File diff suppressed because it is too large
Load Diff
48
ext/zstd/lib/common/zstd_common.c
vendored
Normal file
48
ext/zstd/lib/common/zstd_common.c
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#include "error_private.h"
|
||||
#include "zstd_internal.h"
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* Version
|
||||
******************************************/
|
||||
unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
|
||||
|
||||
const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
|
||||
|
||||
|
||||
/*-****************************************
|
||||
* ZSTD Error Management
|
||||
******************************************/
|
||||
#undef ZSTD_isError /* defined within zstd_internal.h */
|
||||
/*! ZSTD_isError() :
|
||||
* tells if a return value is an error code
|
||||
* symbol is required for external callers */
|
||||
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
/*! ZSTD_getErrorName() :
|
||||
* provides error code string from function result (useful for debugging) */
|
||||
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
|
||||
|
||||
/*! ZSTD_getError() :
|
||||
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
|
||||
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
||||
|
||||
/*! ZSTD_getErrorString() :
|
||||
* provides error code string from enum */
|
||||
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
|
||||
111
ext/zstd/lib/common/zstd_deps.h
Normal file
111
ext/zstd/lib/common/zstd_deps.h
Normal file
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/* This file provides common libc dependencies that zstd requires.
|
||||
* The purpose is to allow replacing this file with a custom implementation
|
||||
* to compile zstd without libc support.
|
||||
*/
|
||||
|
||||
/* Need:
|
||||
* NULL
|
||||
* INT_MAX
|
||||
* UINT_MAX
|
||||
* ZSTD_memcpy()
|
||||
* ZSTD_memset()
|
||||
* ZSTD_memmove()
|
||||
*/
|
||||
#ifndef ZSTD_DEPS_COMMON
|
||||
#define ZSTD_DEPS_COMMON
|
||||
|
||||
#include <limits.h>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ >= 4
|
||||
# define ZSTD_memcpy(d,s,l) __builtin_memcpy((d),(s),(l))
|
||||
# define ZSTD_memmove(d,s,l) __builtin_memmove((d),(s),(l))
|
||||
# define ZSTD_memset(p,v,l) __builtin_memset((p),(v),(l))
|
||||
#else
|
||||
# define ZSTD_memcpy(d,s,l) memcpy((d),(s),(l))
|
||||
# define ZSTD_memmove(d,s,l) memmove((d),(s),(l))
|
||||
# define ZSTD_memset(p,v,l) memset((p),(v),(l))
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_DEPS_COMMON */
|
||||
|
||||
/* Need:
|
||||
* ZSTD_malloc()
|
||||
* ZSTD_free()
|
||||
* ZSTD_calloc()
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_MALLOC
|
||||
#ifndef ZSTD_DEPS_MALLOC
|
||||
#define ZSTD_DEPS_MALLOC
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define ZSTD_malloc(s) malloc(s)
|
||||
#define ZSTD_calloc(n,s) calloc((n), (s))
|
||||
#define ZSTD_free(p) free((p))
|
||||
|
||||
#endif /* ZSTD_DEPS_MALLOC */
|
||||
#endif /* ZSTD_DEPS_NEED_MALLOC */
|
||||
|
||||
/*
|
||||
* Provides 64-bit math support.
|
||||
* Need:
|
||||
* U64 ZSTD_div64(U64 dividend, U32 divisor)
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_MATH64
|
||||
#ifndef ZSTD_DEPS_MATH64
|
||||
#define ZSTD_DEPS_MATH64
|
||||
|
||||
#define ZSTD_div64(dividend, divisor) ((dividend) / (divisor))
|
||||
|
||||
#endif /* ZSTD_DEPS_MATH64 */
|
||||
#endif /* ZSTD_DEPS_NEED_MATH64 */
|
||||
|
||||
/* Need:
|
||||
* assert()
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_ASSERT
|
||||
#ifndef ZSTD_DEPS_ASSERT
|
||||
#define ZSTD_DEPS_ASSERT
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#endif /* ZSTD_DEPS_ASSERT */
|
||||
#endif /* ZSTD_DEPS_NEED_ASSERT */
|
||||
|
||||
/* Need:
|
||||
* ZSTD_DEBUG_PRINT()
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_IO
|
||||
#ifndef ZSTD_DEPS_IO
|
||||
#define ZSTD_DEPS_IO
|
||||
|
||||
#include <stdio.h>
|
||||
#define ZSTD_DEBUG_PRINT(...) fprintf(stderr, __VA_ARGS__)
|
||||
|
||||
#endif /* ZSTD_DEPS_IO */
|
||||
#endif /* ZSTD_DEPS_NEED_IO */
|
||||
|
||||
/* Only requested when <stdint.h> is known to be present.
|
||||
* Need:
|
||||
* intptr_t
|
||||
*/
|
||||
#ifdef ZSTD_DEPS_NEED_STDINT
|
||||
#ifndef ZSTD_DEPS_STDINT
|
||||
#define ZSTD_DEPS_STDINT
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#endif /* ZSTD_DEPS_STDINT */
|
||||
#endif /* ZSTD_DEPS_NEED_STDINT */
|
||||
392
ext/zstd/lib/common/zstd_internal.h
Normal file
392
ext/zstd/lib/common/zstd_internal.h
Normal file
@@ -0,0 +1,392 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CCOMMON_H_MODULE
|
||||
#define ZSTD_CCOMMON_H_MODULE
|
||||
|
||||
/* this module contains definitions which must be identical
|
||||
* across compression, decompression and dictBuilder.
|
||||
* It also contains a few functions useful to at least 2 of them
|
||||
* and which benefit from being inlined */
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "compiler.h"
|
||||
#include "cpu.h"
|
||||
#include "mem.h"
|
||||
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
|
||||
#include "error_private.h"
|
||||
#define ZSTD_STATIC_LINKING_ONLY
|
||||
#include "../zstd.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "fse.h"
|
||||
#include "huf.h"
|
||||
#ifndef XXH_STATIC_LINKING_ONLY
|
||||
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
|
||||
#endif
|
||||
#include "xxhash.h" /* XXH_reset, update, digest */
|
||||
#ifndef ZSTD_NO_TRACE
|
||||
# include "zstd_trace.h"
|
||||
#else
|
||||
# define ZSTD_TRACE 0
|
||||
#endif
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* ---- static assert (debug) --- */
|
||||
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
|
||||
#define ZSTD_isError ERR_isError /* for inlining */
|
||||
#define FSE_isError ERR_isError
|
||||
#define HUF_isError ERR_isError
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* shared macros
|
||||
***************************************/
|
||||
#undef MIN
|
||||
#undef MAX
|
||||
#define MIN(a,b) ((a)<(b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a)>(b) ? (a) : (b))
|
||||
#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
|
||||
|
||||
|
||||
/*-*************************************
|
||||
* Common constants
|
||||
***************************************/
|
||||
#define ZSTD_OPT_NUM (1<<12)
|
||||
|
||||
#define ZSTD_REP_NUM 3 /* number of repcodes */
|
||||
static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
|
||||
|
||||
#define KB *(1 <<10)
|
||||
#define MB *(1 <<20)
|
||||
#define GB *(1U<<30)
|
||||
|
||||
#define BIT7 128
|
||||
#define BIT6 64
|
||||
#define BIT5 32
|
||||
#define BIT4 16
|
||||
#define BIT1 2
|
||||
#define BIT0 1
|
||||
|
||||
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
|
||||
static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
|
||||
static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
|
||||
|
||||
#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
|
||||
|
||||
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
|
||||
static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
|
||||
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
|
||||
|
||||
#define ZSTD_FRAMECHECKSUMSIZE 4
|
||||
|
||||
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
|
||||
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
|
||||
#define MIN_LITERALS_FOR_4_STREAMS 6
|
||||
|
||||
typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
|
||||
|
||||
#define LONGNBSEQ 0x7F00
|
||||
|
||||
#define MINMATCH 3
|
||||
|
||||
#define Litbits 8
|
||||
#define LitHufLog 11
|
||||
#define MaxLit ((1<<Litbits) - 1)
|
||||
#define MaxML 52
|
||||
#define MaxLL 35
|
||||
#define DefaultMaxOff 28
|
||||
#define MaxOff 31
|
||||
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
|
||||
#define MLFSELog 9
|
||||
#define LLFSELog 9
|
||||
#define OffFSELog 8
|
||||
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
|
||||
#define MaxMLBits 16
|
||||
#define MaxLLBits 16
|
||||
|
||||
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
|
||||
/* Each table cannot take more than #symbols * FSELog bits */
|
||||
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
|
||||
|
||||
static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
4, 6, 7, 8, 9,10,11,12,
|
||||
13,14,15,16
|
||||
};
|
||||
static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
|
||||
4, 3, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 1, 1, 1,
|
||||
2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 3, 2, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1
|
||||
};
|
||||
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
|
||||
|
||||
static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
4, 4, 5, 7, 8, 9,10,11,
|
||||
12,13,14,15,16
|
||||
};
|
||||
static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
|
||||
1, 4, 3, 2, 2, 2, 2, 2,
|
||||
2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1,-1,-1,
|
||||
-1,-1,-1,-1,-1
|
||||
};
|
||||
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
|
||||
static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
|
||||
|
||||
static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
|
||||
1, 1, 1, 1, 1, 1, 2, 2,
|
||||
2, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
-1,-1,-1,-1,-1
|
||||
};
|
||||
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
|
||||
static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Shared functions to include for inlining
|
||||
*********************************************/
|
||||
static void ZSTD_copy8(void* dst, const void* src) {
|
||||
#if defined(ZSTD_ARCH_ARM_NEON)
|
||||
vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
|
||||
#else
|
||||
ZSTD_memcpy(dst, src, 8);
|
||||
#endif
|
||||
}
|
||||
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
|
||||
|
||||
/* Need to use memmove here since the literal buffer can now be located within
|
||||
the dst buffer. In circumstances where the op "catches up" to where the
|
||||
literal buffer is, there can be partial overlaps in this call on the final
|
||||
copy if the literal is being shifted by less than 16 bytes. */
|
||||
static void ZSTD_copy16(void* dst, const void* src) {
|
||||
#if defined(ZSTD_ARCH_ARM_NEON)
|
||||
vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
|
||||
#elif defined(ZSTD_ARCH_X86_SSE2)
|
||||
_mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
|
||||
#elif defined(__clang__)
|
||||
ZSTD_memmove(dst, src, 16);
|
||||
#else
|
||||
/* ZSTD_memmove is not inlined properly by gcc */
|
||||
BYTE copy16_buf[16];
|
||||
ZSTD_memcpy(copy16_buf, src, 16);
|
||||
ZSTD_memcpy(dst, copy16_buf, 16);
|
||||
#endif
|
||||
}
|
||||
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
|
||||
|
||||
#define WILDCOPY_OVERLENGTH 32
|
||||
#define WILDCOPY_VECLEN 16
|
||||
|
||||
typedef enum {
|
||||
ZSTD_no_overlap,
|
||||
ZSTD_overlap_src_before_dst
|
||||
/* ZSTD_overlap_dst_before_src, */
|
||||
} ZSTD_overlap_e;
|
||||
|
||||
/*! ZSTD_wildcopy() :
|
||||
* Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
|
||||
* @param ovtype controls the overlap detection
|
||||
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
|
||||
* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
|
||||
* The src buffer must be before the dst buffer.
|
||||
*/
|
||||
MEM_STATIC FORCE_INLINE_ATTR
|
||||
void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
|
||||
{
|
||||
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
BYTE* const oend = op + length;
|
||||
|
||||
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
|
||||
/* Handle short offset copies. */
|
||||
do {
|
||||
COPY8(op, ip)
|
||||
} while (op < oend);
|
||||
} else {
|
||||
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
|
||||
/* Separate out the first COPY16() call because the copy length is
|
||||
* almost certain to be short, so the branches have different
|
||||
* probabilities. Since it is almost certain to be short, only do
|
||||
* one COPY16() in the first call. Then, do two calls per loop since
|
||||
* at that point it is more likely to have a high trip count.
|
||||
*/
|
||||
ZSTD_copy16(op, ip);
|
||||
if (16 >= length) return;
|
||||
op += 16;
|
||||
ip += 16;
|
||||
do {
|
||||
COPY16(op, ip);
|
||||
COPY16(op, ip);
|
||||
}
|
||||
while (op < oend);
|
||||
}
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
size_t const length = MIN(dstCapacity, srcSize);
|
||||
if (length > 0) {
|
||||
ZSTD_memcpy(dst, src, length);
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
/* define "workspace is too large" as this number of times larger than needed */
|
||||
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
|
||||
|
||||
/* when workspace is continuously too large
|
||||
* during at least this number of times,
|
||||
* context's memory usage is considered wasteful,
|
||||
* because it's sized to handle a worst case scenario which rarely happens.
|
||||
* In which case, resize it down to free some memory */
|
||||
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
|
||||
|
||||
/* Controls whether the input/output buffer is buffered or stable. */
|
||||
typedef enum {
|
||||
ZSTD_bm_buffered = 0, /* Buffer the input/output */
|
||||
ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
|
||||
} ZSTD_bufferMode_e;
|
||||
|
||||
|
||||
/*-*******************************************
|
||||
* Private declarations
|
||||
*********************************************/
|
||||
typedef struct seqDef_s {
|
||||
U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
|
||||
U16 litLength;
|
||||
U16 mlBase; /* mlBase == matchLength - MINMATCH */
|
||||
} seqDef;
|
||||
|
||||
/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
|
||||
typedef enum {
|
||||
ZSTD_llt_none = 0, /* no longLengthType */
|
||||
ZSTD_llt_literalLength = 1, /* represents a long literal */
|
||||
ZSTD_llt_matchLength = 2 /* represents a long match */
|
||||
} ZSTD_longLengthType_e;
|
||||
|
||||
typedef struct {
|
||||
seqDef* sequencesStart;
|
||||
seqDef* sequences; /* ptr to end of sequences */
|
||||
BYTE* litStart;
|
||||
BYTE* lit; /* ptr to end of literals */
|
||||
BYTE* llCode;
|
||||
BYTE* mlCode;
|
||||
BYTE* ofCode;
|
||||
size_t maxNbSeq;
|
||||
size_t maxNbLit;
|
||||
|
||||
/* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
|
||||
* in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
|
||||
* the existing value of the litLength or matchLength by 0x10000.
|
||||
*/
|
||||
ZSTD_longLengthType_e longLengthType;
|
||||
U32 longLengthPos; /* Index of the sequence to apply long length modification to */
|
||||
} seqStore_t;
|
||||
|
||||
typedef struct {
|
||||
U32 litLength;
|
||||
U32 matchLength;
|
||||
} ZSTD_sequenceLength;
|
||||
|
||||
/**
|
||||
* Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
|
||||
* indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
|
||||
*/
|
||||
MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
|
||||
{
|
||||
ZSTD_sequenceLength seqLen;
|
||||
seqLen.litLength = seq->litLength;
|
||||
seqLen.matchLength = seq->mlBase + MINMATCH;
|
||||
if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
|
||||
if (seqStore->longLengthType == ZSTD_llt_literalLength) {
|
||||
seqLen.litLength += 0x10000;
|
||||
}
|
||||
if (seqStore->longLengthType == ZSTD_llt_matchLength) {
|
||||
seqLen.matchLength += 0x10000;
|
||||
}
|
||||
}
|
||||
return seqLen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
|
||||
* Note: before using `compressedSize`, check for errors using ZSTD_isError().
|
||||
* similarly, before using `decompressedBound`, check for errors using:
|
||||
* `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
|
||||
*/
|
||||
typedef struct {
|
||||
size_t nbBlocks;
|
||||
size_t compressedSize;
|
||||
unsigned long long decompressedBound;
|
||||
} ZSTD_frameSizeInfo; /* decompress & legacy */
|
||||
|
||||
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
|
||||
int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
|
||||
|
||||
|
||||
/* ZSTD_invalidateRepCodes() :
|
||||
* ensures next compression will not use repcodes from previous block.
|
||||
* Note : only works with regular variant;
|
||||
* do not use with extDict variant ! */
|
||||
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
|
||||
|
||||
|
||||
typedef struct {
|
||||
blockType_e blockType;
|
||||
U32 lastBlock;
|
||||
U32 origSize;
|
||||
} blockProperties_t; /* declared here for decompress and fullbench */
|
||||
|
||||
/*! ZSTD_getcBlockSize() :
|
||||
* Provides the size of compressed block from block header `src` */
|
||||
/* Used by: decompress, fullbench (does not get its definition from here) */
|
||||
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
|
||||
blockProperties_t* bpPtr);
|
||||
|
||||
/*! ZSTD_decodeSeqHeaders() :
|
||||
* decode sequence header from src */
|
||||
/* Used by: decompress, fullbench (does not get its definition from here) */
|
||||
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/**
|
||||
* @returns true iff the CPU supports dynamic BMI2 dispatch.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
|
||||
{
|
||||
ZSTD_cpuid_t cpuid = ZSTD_cpuid();
|
||||
return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CCOMMON_H_MODULE */
|
||||
163
ext/zstd/lib/common/zstd_trace.h
Normal file
163
ext/zstd/lib/common/zstd_trace.h
Normal file
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_TRACE_H
|
||||
#define ZSTD_TRACE_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
/* weak symbol support
|
||||
* For now, enable conservatively:
|
||||
* - Only GNUC
|
||||
* - Only ELF
|
||||
* - Only x86-64, i386 and aarch64
|
||||
* Also, explicitly disable on platforms known not to work so they aren't
|
||||
* forgotten in the future.
|
||||
*/
|
||||
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \
|
||||
defined(__GNUC__) && defined(__ELF__) && \
|
||||
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \
|
||||
!defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \
|
||||
!defined(__CYGWIN__) && !defined(_AIX)
|
||||
# define ZSTD_HAVE_WEAK_SYMBOLS 1
|
||||
#else
|
||||
# define ZSTD_HAVE_WEAK_SYMBOLS 0
|
||||
#endif
|
||||
#if ZSTD_HAVE_WEAK_SYMBOLS
|
||||
# define ZSTD_WEAK_ATTR __attribute__((__weak__))
|
||||
#else
|
||||
# define ZSTD_WEAK_ATTR
|
||||
#endif
|
||||
|
||||
/* Only enable tracing when weak symbols are available. */
|
||||
#ifndef ZSTD_TRACE
|
||||
# define ZSTD_TRACE ZSTD_HAVE_WEAK_SYMBOLS
|
||||
#endif
|
||||
|
||||
#if ZSTD_TRACE
|
||||
|
||||
struct ZSTD_CCtx_s;
|
||||
struct ZSTD_DCtx_s;
|
||||
struct ZSTD_CCtx_params_s;
|
||||
|
||||
typedef struct {
|
||||
/**
|
||||
* ZSTD_VERSION_NUMBER
|
||||
*
|
||||
* This is guaranteed to be the first member of ZSTD_trace.
|
||||
* Otherwise, this struct is not stable between versions. If
|
||||
* the version number does not match your expectation, you
|
||||
* should not interpret the rest of the struct.
|
||||
*/
|
||||
unsigned version;
|
||||
/**
|
||||
* Non-zero if streaming (de)compression is used.
|
||||
*/
|
||||
unsigned streaming;
|
||||
/**
|
||||
* The dictionary ID.
|
||||
*/
|
||||
unsigned dictionaryID;
|
||||
/**
|
||||
* Is the dictionary cold?
|
||||
* Only set on decompression.
|
||||
*/
|
||||
unsigned dictionaryIsCold;
|
||||
/**
|
||||
* The dictionary size or zero if no dictionary.
|
||||
*/
|
||||
size_t dictionarySize;
|
||||
/**
|
||||
* The uncompressed size of the data.
|
||||
*/
|
||||
size_t uncompressedSize;
|
||||
/**
|
||||
* The compressed size of the data.
|
||||
*/
|
||||
size_t compressedSize;
|
||||
/**
|
||||
* The fully resolved CCtx parameters (NULL on decompression).
|
||||
*/
|
||||
struct ZSTD_CCtx_params_s const* params;
|
||||
/**
|
||||
* The ZSTD_CCtx pointer (NULL on decompression).
|
||||
*/
|
||||
struct ZSTD_CCtx_s const* cctx;
|
||||
/**
|
||||
* The ZSTD_DCtx pointer (NULL on compression).
|
||||
*/
|
||||
struct ZSTD_DCtx_s const* dctx;
|
||||
} ZSTD_Trace;
|
||||
|
||||
/**
|
||||
* A tracing context. It must be 0 when tracing is disabled.
|
||||
* Otherwise, any non-zero value returned by a tracing begin()
|
||||
* function is presented to any subsequent calls to end().
|
||||
*
|
||||
* Any non-zero value is treated as tracing is enabled and not
|
||||
* interpreted by the library.
|
||||
*
|
||||
* Two possible uses are:
|
||||
* * A timestamp for when the begin() function was called.
|
||||
* * A unique key identifying the (de)compression, like the
|
||||
* address of the [dc]ctx pointer if you need to track
|
||||
* more information than just a timestamp.
|
||||
*/
|
||||
typedef unsigned long long ZSTD_TraceCtx;
|
||||
|
||||
/**
|
||||
* Trace the beginning of a compression call.
|
||||
* @param cctx The dctx pointer for the compression.
|
||||
* It can be used as a key to map begin() to end().
|
||||
* @returns Non-zero if tracing is enabled. The return value is
|
||||
* passed to ZSTD_trace_compress_end().
|
||||
*/
|
||||
ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_compress_begin(
|
||||
struct ZSTD_CCtx_s const* cctx);
|
||||
|
||||
/**
|
||||
* Trace the end of a compression call.
|
||||
* @param ctx The return value of ZSTD_trace_compress_begin().
|
||||
* @param trace The zstd tracing info.
|
||||
*/
|
||||
ZSTD_WEAK_ATTR void ZSTD_trace_compress_end(
|
||||
ZSTD_TraceCtx ctx,
|
||||
ZSTD_Trace const* trace);
|
||||
|
||||
/**
|
||||
* Trace the beginning of a decompression call.
|
||||
* @param dctx The dctx pointer for the decompression.
|
||||
* It can be used as a key to map begin() to end().
|
||||
* @returns Non-zero if tracing is enabled. The return value is
|
||||
* passed to ZSTD_trace_compress_end().
|
||||
*/
|
||||
ZSTD_WEAK_ATTR ZSTD_TraceCtx ZSTD_trace_decompress_begin(
|
||||
struct ZSTD_DCtx_s const* dctx);
|
||||
|
||||
/**
|
||||
* Trace the end of a decompression call.
|
||||
* @param ctx The return value of ZSTD_trace_decompress_begin().
|
||||
* @param trace The zstd tracing info.
|
||||
*/
|
||||
ZSTD_WEAK_ATTR void ZSTD_trace_decompress_end(
|
||||
ZSTD_TraceCtx ctx,
|
||||
ZSTD_Trace const* trace);
|
||||
|
||||
#endif /* ZSTD_TRACE */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_TRACE_H */
|
||||
134
ext/zstd/lib/compress/clevels.h
Normal file
134
ext/zstd/lib/compress/clevels.h
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CLEVELS_H
|
||||
#define ZSTD_CLEVELS_H
|
||||
|
||||
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
|
||||
#include "../zstd.h"
|
||||
|
||||
/*-===== Pre-defined compression levels =====-*/
|
||||
|
||||
#define ZSTD_MAX_CLEVEL 22
|
||||
|
||||
#ifdef __GNUC__
|
||||
__attribute__((__unused__))
|
||||
#endif
|
||||
|
||||
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
|
||||
{ /* "default" - for any srcSize > 256 KB */
|
||||
/* W, C, H, S, L, TL, strat */
|
||||
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
|
||||
{ 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
|
||||
{ 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */
|
||||
{ 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */
|
||||
{ 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */
|
||||
{ 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */
|
||||
{ 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
|
||||
{ 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */
|
||||
{ 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */
|
||||
{ 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */
|
||||
{ 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
|
||||
{ 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
|
||||
{ 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
|
||||
{ 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
|
||||
{ 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
|
||||
{ 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
|
||||
{ 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
|
||||
{ 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
|
||||
{ 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
|
||||
{ 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
|
||||
},
|
||||
{ /* for srcSize <= 256 KB */
|
||||
/* W, C, H, S, L, T, strat */
|
||||
{ 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
|
||||
{ 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/
|
||||
{ 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/
|
||||
{ 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
|
||||
{ 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
|
||||
{ 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
|
||||
{ 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
|
||||
{ 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
|
||||
{ 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
|
||||
{ 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
|
||||
{ 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
|
||||
{ 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
|
||||
{ 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
|
||||
{ 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
|
||||
{ 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
|
||||
{ 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
|
||||
{ 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||
{ 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||
{ 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||
{ 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||
},
|
||||
{ /* for srcSize <= 128 KB */
|
||||
/* W, C, H, S, L, T, strat */
|
||||
{ 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
|
||||
{ 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
|
||||
{ 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
|
||||
{ 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
|
||||
{ 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
|
||||
{ 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
|
||||
{ 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
|
||||
{ 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
|
||||
{ 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
|
||||
{ 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
|
||||
{ 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
|
||||
{ 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
|
||||
{ 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
|
||||
{ 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
|
||||
{ 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
|
||||
{ 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
|
||||
{ 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||
{ 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||
{ 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||
{ 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||
},
|
||||
{ /* for srcSize <= 16 KB */
|
||||
/* W, C, H, S, L, T, strat */
|
||||
{ 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
|
||||
{ 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
|
||||
{ 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
|
||||
{ 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
|
||||
{ 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
|
||||
{ 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
|
||||
{ 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
|
||||
{ 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
|
||||
{ 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
|
||||
{ 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
|
||||
{ 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
|
||||
{ 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
|
||||
{ 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
|
||||
{ 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
|
||||
{ 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
|
||||
{ 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
|
||||
{ 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
|
||||
{ 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
|
||||
{ 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
|
||||
{ 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
|
||||
{ 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
|
||||
{ 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
|
||||
{ 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif /* ZSTD_CLEVELS_H */
|
||||
624
ext/zstd/lib/compress/fse_compress.c
vendored
Normal file
624
ext/zstd/lib/compress/fse_compress.c
vendored
Normal file
@@ -0,0 +1,624 @@
|
||||
/* ******************************************************************
|
||||
* FSE : Finite State Entropy encoder
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* **************************************************************
|
||||
* Includes
|
||||
****************************************************************/
|
||||
#include "../common/compiler.h"
|
||||
#include "../common/mem.h" /* U32, U16, etc. */
|
||||
#include "../common/debug.h" /* assert, DEBUGLOG */
|
||||
#include "hist.h" /* HIST_count_wksp */
|
||||
#include "../common/bitstream.h"
|
||||
#define FSE_STATIC_LINKING_ONLY
|
||||
#include "../common/fse.h"
|
||||
#include "../common/error_private.h"
|
||||
#define ZSTD_DEPS_NEED_MALLOC
|
||||
#define ZSTD_DEPS_NEED_MATH64
|
||||
#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
|
||||
#include "../common/bits.h" /* ZSTD_highbit32 */
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Error Management
|
||||
****************************************************************/
|
||||
#define FSE_isError ERR_isError
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Templates
|
||||
****************************************************************/
|
||||
/*
|
||||
designed to be included
|
||||
for type-specific functions (template emulation in C)
|
||||
Objective is to write these functions only once, for improved maintenance
|
||||
*/
|
||||
|
||||
/* safety checks */
|
||||
#ifndef FSE_FUNCTION_EXTENSION
|
||||
# error "FSE_FUNCTION_EXTENSION must be defined"
|
||||
#endif
|
||||
#ifndef FSE_FUNCTION_TYPE
|
||||
# error "FSE_FUNCTION_TYPE must be defined"
|
||||
#endif
|
||||
|
||||
/* Function names */
|
||||
#define FSE_CAT(X,Y) X##Y
|
||||
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
|
||||
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
|
||||
|
||||
|
||||
/* Function templates */
|
||||
|
||||
/* FSE_buildCTable_wksp() :
|
||||
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
|
||||
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
|
||||
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
|
||||
*/
|
||||
size_t FSE_buildCTable_wksp(FSE_CTable* ct,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
void* workSpace, size_t wkspSize)
|
||||
{
|
||||
U32 const tableSize = 1 << tableLog;
|
||||
U32 const tableMask = tableSize - 1;
|
||||
void* const ptr = ct;
|
||||
U16* const tableU16 = ( (U16*) ptr) + 2;
|
||||
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
|
||||
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
|
||||
U32 const step = FSE_TABLESTEP(tableSize);
|
||||
U32 const maxSV1 = maxSymbolValue+1;
|
||||
|
||||
U16* cumul = (U16*)workSpace; /* size = maxSV1 */
|
||||
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
|
||||
|
||||
U32 highThreshold = tableSize-1;
|
||||
|
||||
assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
|
||||
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
|
||||
/* CTable header */
|
||||
tableU16[-2] = (U16) tableLog;
|
||||
tableU16[-1] = (U16) maxSymbolValue;
|
||||
assert(tableLog < 16); /* required for threshold strategy to work */
|
||||
|
||||
/* For explanations on how to distribute symbol values over the table :
|
||||
* https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
|
||||
|
||||
#ifdef __clang_analyzer__
|
||||
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
|
||||
#endif
|
||||
|
||||
/* symbol start positions */
|
||||
{ U32 u;
|
||||
cumul[0] = 0;
|
||||
for (u=1; u <= maxSV1; u++) {
|
||||
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
|
||||
cumul[u] = cumul[u-1] + 1;
|
||||
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
|
||||
} else {
|
||||
assert(normalizedCounter[u-1] >= 0);
|
||||
cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
|
||||
assert(cumul[u] >= cumul[u-1]); /* no overflow */
|
||||
} }
|
||||
cumul[maxSV1] = (U16)(tableSize+1);
|
||||
}
|
||||
|
||||
/* Spread symbols */
|
||||
if (highThreshold == tableSize - 1) {
|
||||
/* Case for no low prob count symbols. Lay down 8 bytes at a time
|
||||
* to reduce branch misses since we are operating on a small block
|
||||
*/
|
||||
BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
|
||||
{ U64 const add = 0x0101010101010101ull;
|
||||
size_t pos = 0;
|
||||
U64 sv = 0;
|
||||
U32 s;
|
||||
for (s=0; s<maxSV1; ++s, sv += add) {
|
||||
int i;
|
||||
int const n = normalizedCounter[s];
|
||||
MEM_write64(spread + pos, sv);
|
||||
for (i = 8; i < n; i += 8) {
|
||||
MEM_write64(spread + pos + i, sv);
|
||||
}
|
||||
assert(n>=0);
|
||||
pos += (size_t)n;
|
||||
}
|
||||
}
|
||||
/* Spread symbols across the table. Lack of lowprob symbols means that
|
||||
* we don't need variable sized inner loop, so we can unroll the loop and
|
||||
* reduce branch misses.
|
||||
*/
|
||||
{ size_t position = 0;
|
||||
size_t s;
|
||||
size_t const unroll = 2; /* Experimentally determined optimal unroll */
|
||||
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
|
||||
for (s = 0; s < (size_t)tableSize; s += unroll) {
|
||||
size_t u;
|
||||
for (u = 0; u < unroll; ++u) {
|
||||
size_t const uPosition = (position + (u * step)) & tableMask;
|
||||
tableSymbol[uPosition] = spread[s + u];
|
||||
}
|
||||
position = (position + (unroll * step)) & tableMask;
|
||||
}
|
||||
assert(position == 0); /* Must have initialized all positions */
|
||||
}
|
||||
} else {
|
||||
U32 position = 0;
|
||||
U32 symbol;
|
||||
for (symbol=0; symbol<maxSV1; symbol++) {
|
||||
int nbOccurrences;
|
||||
int const freq = normalizedCounter[symbol];
|
||||
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
|
||||
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
|
||||
position = (position + step) & tableMask;
|
||||
while (position > highThreshold)
|
||||
position = (position + step) & tableMask; /* Low proba area */
|
||||
} }
|
||||
assert(position==0); /* Must have initialized all positions */
|
||||
}
|
||||
|
||||
/* Build table */
|
||||
{ U32 u; for (u=0; u<tableSize; u++) {
|
||||
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
|
||||
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
|
||||
} }
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
{ unsigned total = 0;
|
||||
unsigned s;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
switch (normalizedCounter[s])
|
||||
{
|
||||
case 0:
|
||||
/* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
|
||||
symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
|
||||
break;
|
||||
|
||||
case -1:
|
||||
case 1:
|
||||
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
|
||||
assert(total <= INT_MAX);
|
||||
symbolTT[s].deltaFindState = (int)(total - 1);
|
||||
total ++;
|
||||
break;
|
||||
default :
|
||||
assert(normalizedCounter[s] > 1);
|
||||
{ U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1);
|
||||
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
|
||||
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
|
||||
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
|
||||
total += (unsigned)normalizedCounter[s];
|
||||
} } } }
|
||||
|
||||
#if 0 /* debug : symbol costs */
|
||||
DEBUGLOG(5, "\n --- table statistics : ");
|
||||
{ U32 symbol;
|
||||
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
|
||||
DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
|
||||
symbol, normalizedCounter[symbol],
|
||||
FSE_getMaxNbBits(symbolTT, symbol),
|
||||
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
|
||||
} }
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef FSE_COMMONDEFS_ONLY
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE NCount encoding
|
||||
****************************************************************/
|
||||
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
|
||||
+ 4 /* bitCount initialized at 4 */
|
||||
+ 2 /* first two symbols may use one additional bit each */) / 8)
|
||||
+ 1 /* round up to whole nb bytes */
|
||||
+ 2 /* additional two bytes for bitstream flush */;
|
||||
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
|
||||
}
|
||||
|
||||
static size_t
|
||||
FSE_writeNCount_generic (void* header, size_t headerBufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
|
||||
unsigned writeIsSafe)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*) header;
|
||||
BYTE* out = ostart;
|
||||
BYTE* const oend = ostart + headerBufferSize;
|
||||
int nbBits;
|
||||
const int tableSize = 1 << tableLog;
|
||||
int remaining;
|
||||
int threshold;
|
||||
U32 bitStream = 0;
|
||||
int bitCount = 0;
|
||||
unsigned symbol = 0;
|
||||
unsigned const alphabetSize = maxSymbolValue + 1;
|
||||
int previousIs0 = 0;
|
||||
|
||||
/* Table Size */
|
||||
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
|
||||
bitCount += 4;
|
||||
|
||||
/* Init */
|
||||
remaining = tableSize+1; /* +1 for extra accuracy */
|
||||
threshold = tableSize;
|
||||
nbBits = tableLog+1;
|
||||
|
||||
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
|
||||
if (previousIs0) {
|
||||
unsigned start = symbol;
|
||||
while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
|
||||
if (symbol == alphabetSize) break; /* incorrect distribution */
|
||||
while (symbol >= start+24) {
|
||||
start+=24;
|
||||
bitStream += 0xFFFFU << bitCount;
|
||||
if ((!writeIsSafe) && (out > oend-2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE) bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+=2;
|
||||
bitStream>>=16;
|
||||
}
|
||||
while (symbol >= start+3) {
|
||||
start+=3;
|
||||
bitStream += 3 << bitCount;
|
||||
bitCount += 2;
|
||||
}
|
||||
bitStream += (symbol-start) << bitCount;
|
||||
bitCount += 2;
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
{ int count = normalizedCounter[symbol++];
|
||||
int const max = (2*threshold-1) - remaining;
|
||||
remaining -= count < 0 ? -count : count;
|
||||
count++; /* +1 for extra accuracy */
|
||||
if (count>=threshold)
|
||||
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
|
||||
bitStream += count << bitCount;
|
||||
bitCount += nbBits;
|
||||
bitCount -= (count<max);
|
||||
previousIs0 = (count==1);
|
||||
if (remaining<1) return ERROR(GENERIC);
|
||||
while (remaining<threshold) { nbBits--; threshold>>=1; }
|
||||
}
|
||||
if (bitCount>16) {
|
||||
if ((!writeIsSafe) && (out > oend - 2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out += 2;
|
||||
bitStream >>= 16;
|
||||
bitCount -= 16;
|
||||
} }
|
||||
|
||||
if (remaining != 1)
|
||||
return ERROR(GENERIC); /* incorrect normalized distribution */
|
||||
assert(symbol <= alphabetSize);
|
||||
|
||||
/* flush remaining bitStream */
|
||||
if ((!writeIsSafe) && (out > oend - 2))
|
||||
return ERROR(dstSize_tooSmall); /* Buffer overflow */
|
||||
out[0] = (BYTE)bitStream;
|
||||
out[1] = (BYTE)(bitStream>>8);
|
||||
out+= (bitCount+7) /8;
|
||||
|
||||
return (out-ostart);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_writeNCount (void* buffer, size_t bufferSize,
|
||||
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
|
||||
{
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
|
||||
|
||||
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
|
||||
|
||||
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
|
||||
}
|
||||
|
||||
|
||||
/*-**************************************************************
|
||||
* FSE Compression Code
|
||||
****************************************************************/
|
||||
|
||||
/* provides the minimum logSize to safely represent a distribution */
|
||||
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1;
|
||||
U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2;
|
||||
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
return minBits;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
|
||||
{
|
||||
U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus;
|
||||
U32 tableLog = maxTableLog;
|
||||
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
|
||||
assert(srcSize > 1); /* Not supported, RLE should be used instead */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
|
||||
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
|
||||
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
|
||||
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
|
||||
{
|
||||
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
|
||||
}
|
||||
|
||||
/* Secondary normalization method.
|
||||
To be used when primary method fails. */
|
||||
|
||||
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
|
||||
{
|
||||
short const NOT_YET_ASSIGNED = -2;
|
||||
U32 s;
|
||||
U32 distributed = 0;
|
||||
U32 ToDistribute;
|
||||
|
||||
/* Init */
|
||||
U32 const lowThreshold = (U32)(total >> tableLog);
|
||||
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == 0) {
|
||||
norm[s]=0;
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowThreshold) {
|
||||
norm[s] = lowProbCount;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
if (count[s] <= lowOne) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
}
|
||||
|
||||
norm[s]=NOT_YET_ASSIGNED;
|
||||
}
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
|
||||
if (ToDistribute == 0)
|
||||
return 0;
|
||||
|
||||
if ((total / ToDistribute) > lowOne) {
|
||||
/* risk of rounding to zero */
|
||||
lowOne = (U32)((total * 3) / (ToDistribute * 2));
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
|
||||
norm[s] = 1;
|
||||
distributed++;
|
||||
total -= count[s];
|
||||
continue;
|
||||
} }
|
||||
ToDistribute = (1 << tableLog) - distributed;
|
||||
}
|
||||
|
||||
if (distributed == maxSymbolValue+1) {
|
||||
/* all values are pretty poor;
|
||||
probably incompressible data (should have already been detected);
|
||||
find max, then give all remaining points to max */
|
||||
U32 maxV = 0, maxC = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > maxC) { maxV=s; maxC=count[s]; }
|
||||
norm[maxV] += (short)ToDistribute;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (total == 0) {
|
||||
/* all of the symbols were low enough for the lowOne or lowThreshold */
|
||||
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
|
||||
if (norm[s] > 0) { ToDistribute--; norm[s]++; }
|
||||
return 0;
|
||||
}
|
||||
|
||||
{ U64 const vStepLog = 62 - tableLog;
|
||||
U64 const mid = (1ULL << (vStepLog-1)) - 1;
|
||||
U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
|
||||
U64 tmpTotal = mid;
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (norm[s]==NOT_YET_ASSIGNED) {
|
||||
U64 const end = tmpTotal + (count[s] * rStep);
|
||||
U32 const sStart = (U32)(tmpTotal >> vStepLog);
|
||||
U32 const sEnd = (U32)(end >> vStepLog);
|
||||
U32 const weight = sEnd - sStart;
|
||||
if (weight < 1)
|
||||
return ERROR(GENERIC);
|
||||
norm[s] = (short)weight;
|
||||
tmpTotal = end;
|
||||
} } }
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
|
||||
const unsigned* count, size_t total,
|
||||
unsigned maxSymbolValue, unsigned useLowProbCount)
|
||||
{
|
||||
/* Sanity checks */
|
||||
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
|
||||
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
|
||||
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
|
||||
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
|
||||
|
||||
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
|
||||
short const lowProbCount = useLowProbCount ? -1 : 1;
|
||||
U64 const scale = 62 - tableLog;
|
||||
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
|
||||
U64 const vStep = 1ULL<<(scale-20);
|
||||
int stillToDistribute = 1<<tableLog;
|
||||
unsigned s;
|
||||
unsigned largest=0;
|
||||
short largestP=0;
|
||||
U32 lowThreshold = (U32)(total >> tableLog);
|
||||
|
||||
for (s=0; s<=maxSymbolValue; s++) {
|
||||
if (count[s] == total) return 0; /* rle special case */
|
||||
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
|
||||
if (count[s] <= lowThreshold) {
|
||||
normalizedCounter[s] = lowProbCount;
|
||||
stillToDistribute--;
|
||||
} else {
|
||||
short proba = (short)((count[s]*step) >> scale);
|
||||
if (proba<8) {
|
||||
U64 restToBeat = vStep * rtbTable[proba];
|
||||
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
|
||||
}
|
||||
if (proba > largestP) { largestP=proba; largest=s; }
|
||||
normalizedCounter[s] = proba;
|
||||
stillToDistribute -= proba;
|
||||
} }
|
||||
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
|
||||
/* corner case, need another normalization method */
|
||||
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
|
||||
if (FSE_isError(errorCode)) return errorCode;
|
||||
}
|
||||
else normalizedCounter[largest] += (short)stillToDistribute;
|
||||
}
|
||||
|
||||
#if 0
|
||||
{ /* Print Table (debug) */
|
||||
U32 s;
|
||||
U32 nTotal = 0;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
nTotal += abs(normalizedCounter[s]);
|
||||
if (nTotal != (1U<<tableLog))
|
||||
RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
|
||||
getchar();
|
||||
}
|
||||
#endif
|
||||
|
||||
return tableLog;
|
||||
}
|
||||
|
||||
/* fake FSE_CTable, for rle input (always same symbol) */
|
||||
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
|
||||
{
|
||||
void* ptr = ct;
|
||||
U16* tableU16 = ( (U16*) ptr) + 2;
|
||||
void* FSCTptr = (U32*)ptr + 2;
|
||||
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
|
||||
|
||||
/* header */
|
||||
tableU16[-2] = (U16) 0;
|
||||
tableU16[-1] = (U16) symbolValue;
|
||||
|
||||
/* Build table */
|
||||
tableU16[0] = 0;
|
||||
tableU16[1] = 0; /* just in case */
|
||||
|
||||
/* Build Symbol Transformation Table */
|
||||
symbolTT[symbolValue].deltaNbBits = 0;
|
||||
symbolTT[symbolValue].deltaFindState = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct, const unsigned fast)
|
||||
{
|
||||
const BYTE* const istart = (const BYTE*) src;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* ip=iend;
|
||||
|
||||
BIT_CStream_t bitC;
|
||||
FSE_CState_t CState1, CState2;
|
||||
|
||||
/* init */
|
||||
if (srcSize <= 2) return 0;
|
||||
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
|
||||
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
|
||||
|
||||
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
|
||||
|
||||
if (srcSize & 1) {
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
} else {
|
||||
FSE_initCState2(&CState2, ct, *--ip);
|
||||
FSE_initCState2(&CState1, ct, *--ip);
|
||||
}
|
||||
|
||||
/* join to mod 4 */
|
||||
srcSize -= 2;
|
||||
if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
/* 2 or 4 encoding per loop */
|
||||
while ( ip>istart ) {
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
|
||||
if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
|
||||
FSE_encodeSymbol(&bitC, &CState2, *--ip);
|
||||
FSE_encodeSymbol(&bitC, &CState1, *--ip);
|
||||
}
|
||||
|
||||
FSE_FLUSHBITS(&bitC);
|
||||
}
|
||||
|
||||
FSE_flushCState(&bitC, &CState2);
|
||||
FSE_flushCState(&bitC, &CState1);
|
||||
return BIT_closeCStream(&bitC);
|
||||
}
|
||||
|
||||
size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
|
||||
const void* src, size_t srcSize,
|
||||
const FSE_CTable* ct)
|
||||
{
|
||||
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
|
||||
|
||||
if (fast)
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
|
||||
else
|
||||
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
|
||||
}
|
||||
|
||||
|
||||
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
|
||||
|
||||
#endif /* FSE_COMMONDEFS_ONLY */
|
||||
181
ext/zstd/lib/compress/hist.c
vendored
Normal file
181
ext/zstd/lib/compress/hist.c
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
/* ******************************************************************
|
||||
* hist : Histogram functions
|
||||
* part of Finite State Entropy project
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* --- dependencies --- */
|
||||
#include "../common/mem.h" /* U32, BYTE, etc. */
|
||||
#include "../common/debug.h" /* assert, DEBUGLOG */
|
||||
#include "../common/error_private.h" /* ERROR */
|
||||
#include "hist.h"
|
||||
|
||||
|
||||
/* --- Error management --- */
|
||||
unsigned HIST_isError(size_t code) { return ERR_isError(code); }
|
||||
|
||||
/*-**************************************************************
|
||||
* Histogram functions
|
||||
****************************************************************/
|
||||
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)src;
|
||||
const BYTE* const end = ip + srcSize;
|
||||
unsigned maxSymbolValue = *maxSymbolValuePtr;
|
||||
unsigned largestCount=0;
|
||||
|
||||
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
|
||||
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
|
||||
|
||||
while (ip<end) {
|
||||
assert(*ip <= maxSymbolValue);
|
||||
count[*ip++]++;
|
||||
}
|
||||
|
||||
while (!count[maxSymbolValue]) maxSymbolValue--;
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
|
||||
{ U32 s;
|
||||
for (s=0; s<=maxSymbolValue; s++)
|
||||
if (count[s] > largestCount) largestCount = count[s];
|
||||
}
|
||||
|
||||
return largestCount;
|
||||
}
|
||||
|
||||
typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
|
||||
|
||||
/* HIST_count_parallel_wksp() :
|
||||
* store histogram into 4 intermediate tables, recombined at the end.
|
||||
* this design makes better use of OoO cpus,
|
||||
* and is noticeably faster when some values are heavily repeated.
|
||||
* But it needs some additional workspace for intermediate tables.
|
||||
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
|
||||
* @return : largest histogram frequency,
|
||||
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
|
||||
static size_t HIST_count_parallel_wksp(
|
||||
unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
HIST_checkInput_e check,
|
||||
U32* const workSpace)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)source;
|
||||
const BYTE* const iend = ip+sourceSize;
|
||||
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
|
||||
unsigned max=0;
|
||||
U32* const Counting1 = workSpace;
|
||||
U32* const Counting2 = Counting1 + 256;
|
||||
U32* const Counting3 = Counting2 + 256;
|
||||
U32* const Counting4 = Counting3 + 256;
|
||||
|
||||
/* safety checks */
|
||||
assert(*maxSymbolValuePtr <= 255);
|
||||
if (!sourceSize) {
|
||||
ZSTD_memset(count, 0, countSize);
|
||||
*maxSymbolValuePtr = 0;
|
||||
return 0;
|
||||
}
|
||||
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
|
||||
|
||||
/* by stripes of 16 bytes */
|
||||
{ U32 cached = MEM_read32(ip); ip += 4;
|
||||
while (ip < iend-15) {
|
||||
U32 c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
c = cached; cached = MEM_read32(ip); ip += 4;
|
||||
Counting1[(BYTE) c ]++;
|
||||
Counting2[(BYTE)(c>>8) ]++;
|
||||
Counting3[(BYTE)(c>>16)]++;
|
||||
Counting4[ c>>24 ]++;
|
||||
}
|
||||
ip-=4;
|
||||
}
|
||||
|
||||
/* finish last symbols */
|
||||
while (ip<iend) Counting1[*ip++]++;
|
||||
|
||||
{ U32 s;
|
||||
for (s=0; s<256; s++) {
|
||||
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
|
||||
if (Counting1[s] > max) max = Counting1[s];
|
||||
} }
|
||||
|
||||
{ unsigned maxSymbolValue = 255;
|
||||
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
|
||||
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
|
||||
*maxSymbolValuePtr = maxSymbolValue;
|
||||
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
|
||||
}
|
||||
return (size_t)max;
|
||||
}
|
||||
|
||||
/* HIST_countFast_wksp() :
|
||||
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
||||
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
||||
*/
|
||||
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
void* workSpace, size_t workSpaceSize)
|
||||
{
|
||||
if (sourceSize < 1500) /* heuristic threshold */
|
||||
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
|
||||
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
||||
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
|
||||
}
|
||||
|
||||
/* HIST_count_wksp() :
|
||||
* Same as HIST_count(), but using an externally provided scratch buffer.
|
||||
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
|
||||
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize,
|
||||
void* workSpace, size_t workSpaceSize)
|
||||
{
|
||||
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
|
||||
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
|
||||
if (*maxSymbolValuePtr < 255)
|
||||
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
|
||||
*maxSymbolValuePtr = 255;
|
||||
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
|
||||
}
|
||||
|
||||
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
|
||||
/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
|
||||
size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* source, size_t sourceSize)
|
||||
{
|
||||
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
|
||||
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
|
||||
}
|
||||
|
||||
size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize)
|
||||
{
|
||||
unsigned tmpCounters[HIST_WKSP_SIZE_U32];
|
||||
return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
|
||||
}
|
||||
#endif
|
||||
75
ext/zstd/lib/compress/hist.h
Normal file
75
ext/zstd/lib/compress/hist.h
Normal file
@@ -0,0 +1,75 @@
|
||||
/* ******************************************************************
|
||||
* hist : Histogram functions
|
||||
* part of Finite State Entropy project
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
|
||||
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
****************************************************************** */
|
||||
|
||||
/* --- dependencies --- */
|
||||
#include "../common/zstd_deps.h" /* size_t */
|
||||
|
||||
|
||||
/* --- simple histogram functions --- */
|
||||
|
||||
/*! HIST_count():
|
||||
* Provides the precise count of each byte within a table 'count'.
|
||||
* 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
|
||||
* Updates *maxSymbolValuePtr with actual largest symbol value detected.
|
||||
* @return : count of the most frequent symbol (which isn't identified).
|
||||
* or an error code, which can be tested using HIST_isError().
|
||||
* note : if return == srcSize, there is only one symbol.
|
||||
*/
|
||||
size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */
|
||||
|
||||
|
||||
/* --- advanced histogram functions --- */
|
||||
|
||||
#define HIST_WKSP_SIZE_U32 1024
|
||||
#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
|
||||
/** HIST_count_wksp() :
|
||||
* Same as HIST_count(), but using an externally provided scratch buffer.
|
||||
* Benefit is this function will use very little stack space.
|
||||
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
||||
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
||||
*/
|
||||
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t workSpaceSize);
|
||||
|
||||
/** HIST_countFast() :
|
||||
* same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
|
||||
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
|
||||
*/
|
||||
size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/** HIST_countFast_wksp() :
|
||||
* Same as HIST_countFast(), but using an externally provided scratch buffer.
|
||||
* `workSpace` is a writable buffer which must be 4-bytes aligned,
|
||||
* `workSpaceSize` must be >= HIST_WKSP_SIZE
|
||||
*/
|
||||
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize,
|
||||
void* workSpace, size_t workSpaceSize);
|
||||
|
||||
/*! HIST_count_simple() :
|
||||
* Same as HIST_countFast(), this function is unsafe,
|
||||
* and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
|
||||
* It is also a bit slower for large inputs.
|
||||
* However, it does not need any additional memory (not even on stack).
|
||||
* @return : count of the most frequent symbol.
|
||||
* Note this function doesn't produce any error (i.e. it must succeed).
|
||||
*/
|
||||
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
|
||||
const void* src, size_t srcSize);
|
||||
1435
ext/zstd/lib/compress/huf_compress.c
vendored
Normal file
1435
ext/zstd/lib/compress/huf_compress.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7032
ext/zstd/lib/compress/zstd_compress.c
vendored
Normal file
7032
ext/zstd/lib/compress/zstd_compress.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1532
ext/zstd/lib/compress/zstd_compress_internal.h
Normal file
1532
ext/zstd/lib/compress/zstd_compress_internal.h
Normal file
File diff suppressed because it is too large
Load Diff
235
ext/zstd/lib/compress/zstd_compress_literals.c
vendored
Normal file
235
ext/zstd/lib/compress/zstd_compress_literals.c
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_compress_literals.h"
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Debug Traces
|
||||
****************************************************************/
|
||||
#if DEBUGLEVEL >= 2
|
||||
|
||||
static size_t showHexa(const void* src, size_t srcSize)
|
||||
{
|
||||
const BYTE* const ip = (const BYTE*)src;
|
||||
size_t u;
|
||||
for (u=0; u<srcSize; u++) {
|
||||
RAWLOG(5, " %02X", ip[u]); (void)ip;
|
||||
}
|
||||
RAWLOG(5, " \n");
|
||||
return srcSize;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* **************************************************************
|
||||
* Literals compression - special cases
|
||||
****************************************************************/
|
||||
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
||||
|
||||
DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
|
||||
|
||||
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
|
||||
|
||||
switch(flSize)
|
||||
{
|
||||
case 1: /* 2 - 1 - 5 */
|
||||
ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
|
||||
break;
|
||||
case 2: /* 2 - 2 - 12 */
|
||||
MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
|
||||
break;
|
||||
case 3: /* 2 - 2 - 20 */
|
||||
MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
|
||||
break;
|
||||
default: /* not necessary : flSize is {1,2,3} */
|
||||
assert(0);
|
||||
}
|
||||
|
||||
ZSTD_memcpy(ostart + flSize, src, srcSize);
|
||||
DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
|
||||
return srcSize + flSize;
|
||||
}
|
||||
|
||||
static int allBytesIdentical(const void* src, size_t srcSize)
|
||||
{
|
||||
assert(srcSize >= 1);
|
||||
assert(src != NULL);
|
||||
{ const BYTE b = ((const BYTE*)src)[0];
|
||||
size_t p;
|
||||
for (p=1; p<srcSize; p++) {
|
||||
if (((const BYTE*)src)[p] != b) return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
||||
|
||||
assert(dstCapacity >= 4); (void)dstCapacity;
|
||||
assert(allBytesIdentical(src, srcSize));
|
||||
|
||||
switch(flSize)
|
||||
{
|
||||
case 1: /* 2 - 1 - 5 */
|
||||
ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
|
||||
break;
|
||||
case 2: /* 2 - 2 - 12 */
|
||||
MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
|
||||
break;
|
||||
case 3: /* 2 - 2 - 20 */
|
||||
MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
|
||||
break;
|
||||
default: /* not necessary : flSize is {1,2,3} */
|
||||
assert(0);
|
||||
}
|
||||
|
||||
ostart[flSize] = *(const BYTE*)src;
|
||||
DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
|
||||
return flSize+1;
|
||||
}
|
||||
|
||||
/* ZSTD_minLiteralsToCompress() :
|
||||
* returns minimal amount of literals
|
||||
* for literal compression to even be attempted.
|
||||
* Minimum is made tighter as compression strategy increases.
|
||||
*/
|
||||
static size_t
|
||||
ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
|
||||
{
|
||||
assert((int)strategy >= 0);
|
||||
assert((int)strategy <= 9);
|
||||
/* btultra2 : min 8 bytes;
|
||||
* then 2x larger for each successive compression strategy
|
||||
* max threshold 64 bytes */
|
||||
{ int const shift = MIN(9-(int)strategy, 3);
|
||||
size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
|
||||
DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
|
||||
return mintc;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_compressLiterals (
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||
const ZSTD_hufCTables_t* prevHuf,
|
||||
ZSTD_hufCTables_t* nextHuf,
|
||||
ZSTD_strategy strategy,
|
||||
int disableLiteralCompression,
|
||||
int suspectUncompressible,
|
||||
int bmi2)
|
||||
{
|
||||
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
U32 singleStream = srcSize < 256;
|
||||
symbolEncodingType_e hType = set_compressed;
|
||||
size_t cLitSize;
|
||||
|
||||
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
|
||||
disableLiteralCompression, (U32)srcSize, dstCapacity);
|
||||
|
||||
DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
|
||||
|
||||
/* Prepare nextEntropy assuming reusing the existing table */
|
||||
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
||||
|
||||
if (disableLiteralCompression)
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
|
||||
/* if too small, don't even attempt compression (speed opt) */
|
||||
if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
|
||||
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
|
||||
{ HUF_repeat repeat = prevHuf->repeatMode;
|
||||
int const flags = 0
|
||||
| (bmi2 ? HUF_flags_bmi2 : 0)
|
||||
| (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
|
||||
| (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
|
||||
| (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
|
||||
|
||||
typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
|
||||
huf_compress_f huf_compress;
|
||||
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
||||
huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
|
||||
cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
|
||||
src, srcSize,
|
||||
HUF_SYMBOLVALUE_MAX, LitHufLog,
|
||||
entropyWorkspace, entropyWorkspaceSize,
|
||||
(HUF_CElt*)nextHuf->CTable,
|
||||
&repeat, flags);
|
||||
DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
|
||||
if (repeat != HUF_repeat_none) {
|
||||
/* reused the existing table */
|
||||
DEBUGLOG(5, "reusing statistics from previous huffman block");
|
||||
hType = set_repeat;
|
||||
}
|
||||
}
|
||||
|
||||
{ size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
||||
if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
|
||||
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
||||
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
||||
} }
|
||||
if (cLitSize==1) {
|
||||
/* A return value of 1 signals that the alphabet consists of a single symbol.
|
||||
* However, in some rare circumstances, it could be the compressed size (a single byte).
|
||||
* For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
|
||||
* (it's also necessary to not generate statistics).
|
||||
* Therefore, in such a case, actively check that all bytes are identical. */
|
||||
if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
|
||||
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
||||
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
|
||||
} }
|
||||
|
||||
if (hType == set_compressed) {
|
||||
/* using a newly constructed table */
|
||||
nextHuf->repeatMode = HUF_repeat_check;
|
||||
}
|
||||
|
||||
/* Build header */
|
||||
switch(lhSize)
|
||||
{
|
||||
case 3: /* 2 - 2 - 10 - 10 */
|
||||
if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
||||
{ U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
|
||||
MEM_writeLE24(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 4: /* 2 - 2 - 14 - 14 */
|
||||
assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
||||
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 5: /* 2 - 2 - 18 - 18 */
|
||||
assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
||||
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
ostart[4] = (BYTE)(cLitSize >> 10);
|
||||
break;
|
||||
}
|
||||
default: /* not possible : lhSize is {3,4,5} */
|
||||
assert(0);
|
||||
}
|
||||
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
|
||||
return lhSize+cLitSize;
|
||||
}
|
||||
39
ext/zstd/lib/compress/zstd_compress_literals.h
Normal file
39
ext/zstd/lib/compress/zstd_compress_literals.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_LITERALS_H
|
||||
#define ZSTD_COMPRESS_LITERALS_H
|
||||
|
||||
#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
|
||||
|
||||
|
||||
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
/* ZSTD_compressRleLiteralsBlock() :
|
||||
* Conditions :
|
||||
* - All bytes in @src are identical
|
||||
* - dstCapacity >= 4 */
|
||||
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
/* ZSTD_compressLiterals():
|
||||
* @entropyWorkspace: must be aligned on 4-bytes boundaries
|
||||
* @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
|
||||
* @suspectUncompressible: sampling checks, to potentially skip huffman coding
|
||||
*/
|
||||
size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
||||
const ZSTD_hufCTables_t* prevHuf,
|
||||
ZSTD_hufCTables_t* nextHuf,
|
||||
ZSTD_strategy strategy, int disableLiteralCompression,
|
||||
int suspectUncompressible,
|
||||
int bmi2);
|
||||
|
||||
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
||||
442
ext/zstd/lib/compress/zstd_compress_sequences.c
vendored
Normal file
442
ext/zstd/lib/compress/zstd_compress_sequences.c
vendored
Normal file
@@ -0,0 +1,442 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_compress_sequences.h"
|
||||
|
||||
/**
|
||||
* -log2(x / 256) lookup table for x in [0, 256).
|
||||
* If x == 0: Return 0
|
||||
* Else: Return floor(-log2(x / 256) * 256)
|
||||
*/
|
||||
static unsigned const kInverseProbabilityLog256[256] = {
|
||||
0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
|
||||
1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
|
||||
874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
|
||||
724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
|
||||
618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
|
||||
535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
|
||||
468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
|
||||
411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
|
||||
362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
|
||||
318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
|
||||
279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
|
||||
244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
|
||||
212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
|
||||
182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
|
||||
155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
|
||||
130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
|
||||
106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
|
||||
83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
|
||||
62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
|
||||
42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
|
||||
23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
|
||||
5, 4, 2, 1,
|
||||
};
|
||||
|
||||
static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
|
||||
void const* ptr = ctable;
|
||||
U16 const* u16ptr = (U16 const*)ptr;
|
||||
U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
|
||||
return maxSymbolValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if we should use ncount=-1 else we should
|
||||
* use ncount=1 for low probability symbols instead.
|
||||
*/
|
||||
static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
|
||||
{
|
||||
/* Heuristic: This should cover most blocks <= 16K and
|
||||
* start to fade out after 16K to about 32K depending on
|
||||
* compressibility.
|
||||
*/
|
||||
return nbSeq >= 2048;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bytes of encoding the normalized count header.
|
||||
* Returns an error if any of the helper functions return an error.
|
||||
*/
|
||||
static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
|
||||
size_t const nbSeq, unsigned const FSELog)
|
||||
{
|
||||
BYTE wksp[FSE_NCOUNTBOUND];
|
||||
S16 norm[MaxSeq + 1];
|
||||
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
||||
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
|
||||
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bits of encoding the distribution described by count
|
||||
* using the entropy bound.
|
||||
*/
|
||||
static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
|
||||
{
|
||||
unsigned cost = 0;
|
||||
unsigned s;
|
||||
|
||||
assert(total > 0);
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned norm = (unsigned)((256 * count[s]) / total);
|
||||
if (count[s] != 0 && norm == 0)
|
||||
norm = 1;
|
||||
assert(count[s] < total);
|
||||
cost += count[s] * kInverseProbabilityLog256[norm];
|
||||
}
|
||||
return cost >> 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bits of encoding the distribution in count using ctable.
|
||||
* Returns an error if ctable cannot represent all the symbols in count.
|
||||
*/
|
||||
size_t ZSTD_fseBitCost(
|
||||
FSE_CTable const* ctable,
|
||||
unsigned const* count,
|
||||
unsigned const max)
|
||||
{
|
||||
unsigned const kAccuracyLog = 8;
|
||||
size_t cost = 0;
|
||||
unsigned s;
|
||||
FSE_CState_t cstate;
|
||||
FSE_initCState(&cstate, ctable);
|
||||
if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
|
||||
DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
|
||||
ZSTD_getFSEMaxSymbolValue(ctable), max);
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned const tableLog = cstate.stateLog;
|
||||
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
|
||||
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
|
||||
if (count[s] == 0)
|
||||
continue;
|
||||
if (bitCost >= badCost) {
|
||||
DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
|
||||
return ERROR(GENERIC);
|
||||
}
|
||||
cost += (size_t)count[s] * bitCost;
|
||||
}
|
||||
return cost >> kAccuracyLog;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cost in bits of encoding the distribution in count using the
|
||||
* table described by norm. The max symbol support by norm is assumed >= max.
|
||||
* norm must be valid for every symbol with non-zero probability in count.
|
||||
*/
|
||||
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
|
||||
unsigned const* count, unsigned const max)
|
||||
{
|
||||
unsigned const shift = 8 - accuracyLog;
|
||||
size_t cost = 0;
|
||||
unsigned s;
|
||||
assert(accuracyLog <= 8);
|
||||
for (s = 0; s <= max; ++s) {
|
||||
unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
|
||||
unsigned const norm256 = normAcc << shift;
|
||||
assert(norm256 > 0);
|
||||
assert(norm256 < 256);
|
||||
cost += count[s] * kInverseProbabilityLog256[norm256];
|
||||
}
|
||||
return cost >> 8;
|
||||
}
|
||||
|
||||
symbolEncodingType_e
|
||||
ZSTD_selectEncodingType(
|
||||
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
|
||||
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
|
||||
FSE_CTable const* prevCTable,
|
||||
short const* defaultNorm, U32 defaultNormLog,
|
||||
ZSTD_defaultPolicy_e const isDefaultAllowed,
|
||||
ZSTD_strategy const strategy)
|
||||
{
|
||||
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
|
||||
if (mostFrequent == nbSeq) {
|
||||
*repeatMode = FSE_repeat_none;
|
||||
if (isDefaultAllowed && nbSeq <= 2) {
|
||||
/* Prefer set_basic over set_rle when there are 2 or fewer symbols,
|
||||
* since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
|
||||
* If basic encoding isn't possible, always choose RLE.
|
||||
*/
|
||||
DEBUGLOG(5, "Selected set_basic");
|
||||
return set_basic;
|
||||
}
|
||||
DEBUGLOG(5, "Selected set_rle");
|
||||
return set_rle;
|
||||
}
|
||||
if (strategy < ZSTD_lazy) {
|
||||
if (isDefaultAllowed) {
|
||||
size_t const staticFse_nbSeq_max = 1000;
|
||||
size_t const mult = 10 - strategy;
|
||||
size_t const baseLog = 3;
|
||||
size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
|
||||
assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
|
||||
assert(mult <= 9 && mult >= 7);
|
||||
if ( (*repeatMode == FSE_repeat_valid)
|
||||
&& (nbSeq < staticFse_nbSeq_max) ) {
|
||||
DEBUGLOG(5, "Selected set_repeat");
|
||||
return set_repeat;
|
||||
}
|
||||
if ( (nbSeq < dynamicFse_nbSeq_min)
|
||||
|| (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
|
||||
DEBUGLOG(5, "Selected set_basic");
|
||||
/* The format allows default tables to be repeated, but it isn't useful.
|
||||
* When using simple heuristics to select encoding type, we don't want
|
||||
* to confuse these tables with dictionaries. When running more careful
|
||||
* analysis, we don't need to waste time checking both repeating tables
|
||||
* and default tables.
|
||||
*/
|
||||
*repeatMode = FSE_repeat_none;
|
||||
return set_basic;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
|
||||
size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
|
||||
size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
|
||||
size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
|
||||
|
||||
if (isDefaultAllowed) {
|
||||
assert(!ZSTD_isError(basicCost));
|
||||
assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
|
||||
}
|
||||
assert(!ZSTD_isError(NCountCost));
|
||||
assert(compressedCost < ERROR(maxCode));
|
||||
DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
|
||||
(unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
|
||||
if (basicCost <= repeatCost && basicCost <= compressedCost) {
|
||||
DEBUGLOG(5, "Selected set_basic");
|
||||
assert(isDefaultAllowed);
|
||||
*repeatMode = FSE_repeat_none;
|
||||
return set_basic;
|
||||
}
|
||||
if (repeatCost <= compressedCost) {
|
||||
DEBUGLOG(5, "Selected set_repeat");
|
||||
assert(!ZSTD_isError(repeatCost));
|
||||
return set_repeat;
|
||||
}
|
||||
assert(compressedCost < basicCost && compressedCost < repeatCost);
|
||||
}
|
||||
DEBUGLOG(5, "Selected set_compressed");
|
||||
*repeatMode = FSE_repeat_check;
|
||||
return set_compressed;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
S16 norm[MaxSeq + 1];
|
||||
U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
|
||||
} ZSTD_BuildCTableWksp;
|
||||
|
||||
size_t
|
||||
ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
||||
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
|
||||
unsigned* count, U32 max,
|
||||
const BYTE* codeTable, size_t nbSeq,
|
||||
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize)
|
||||
{
|
||||
BYTE* op = (BYTE*)dst;
|
||||
const BYTE* const oend = op + dstCapacity;
|
||||
DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
|
||||
|
||||
switch (type) {
|
||||
case set_rle:
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
|
||||
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
|
||||
*op = codeTable[0];
|
||||
return 1;
|
||||
case set_repeat:
|
||||
ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
|
||||
return 0;
|
||||
case set_basic:
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
|
||||
return 0;
|
||||
case set_compressed: {
|
||||
ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
|
||||
size_t nbSeq_1 = nbSeq;
|
||||
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
||||
if (count[codeTable[nbSeq-1]] > 1) {
|
||||
count[codeTable[nbSeq-1]]--;
|
||||
nbSeq_1--;
|
||||
}
|
||||
assert(nbSeq_1 > 1);
|
||||
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
|
||||
(void)entropyWorkspaceSize;
|
||||
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
|
||||
assert(oend >= op);
|
||||
{ size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
|
||||
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
|
||||
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
|
||||
return NCountSize;
|
||||
}
|
||||
}
|
||||
default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
ZSTD_encodeSequences_body(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets)
|
||||
{
|
||||
BIT_CStream_t blockStream;
|
||||
FSE_CState_t stateMatchLength;
|
||||
FSE_CState_t stateOffsetBits;
|
||||
FSE_CState_t stateLitLength;
|
||||
|
||||
RETURN_ERROR_IF(
|
||||
ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
|
||||
dstSize_tooSmall, "not enough space remaining");
|
||||
DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
|
||||
(int)(blockStream.endPtr - blockStream.startPtr),
|
||||
(unsigned)dstCapacity);
|
||||
|
||||
/* first symbols */
|
||||
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
|
||||
FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
|
||||
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
||||
if (longOffsets) {
|
||||
U32 const ofBits = ofCodeTable[nbSeq-1];
|
||||
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
||||
if (extraBits) {
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
|
||||
BIT_flushBits(&blockStream);
|
||||
}
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
|
||||
ofBits - extraBits);
|
||||
} else {
|
||||
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
|
||||
}
|
||||
BIT_flushBits(&blockStream);
|
||||
|
||||
{ size_t n;
|
||||
for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
|
||||
BYTE const llCode = llCodeTable[n];
|
||||
BYTE const ofCode = ofCodeTable[n];
|
||||
BYTE const mlCode = mlCodeTable[n];
|
||||
U32 const llBits = LL_bits[llCode];
|
||||
U32 const ofBits = ofCode;
|
||||
U32 const mlBits = ML_bits[mlCode];
|
||||
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
|
||||
(unsigned)sequences[n].litLength,
|
||||
(unsigned)sequences[n].mlBase + MINMATCH,
|
||||
(unsigned)sequences[n].offBase);
|
||||
/* 32b*/ /* 64b*/
|
||||
/* (7)*/ /* (7)*/
|
||||
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
|
||||
FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
|
||||
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
|
||||
FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
|
||||
if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
|
||||
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
|
||||
BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
|
||||
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
|
||||
if (longOffsets) {
|
||||
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
||||
if (extraBits) {
|
||||
BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
}
|
||||
BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
|
||||
ofBits - extraBits); /* 31 */
|
||||
} else {
|
||||
BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
|
||||
}
|
||||
BIT_flushBits(&blockStream); /* (7)*/
|
||||
DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
|
||||
} }
|
||||
|
||||
DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
|
||||
FSE_flushCState(&blockStream, &stateMatchLength);
|
||||
DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
|
||||
FSE_flushCState(&blockStream, &stateOffsetBits);
|
||||
DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
|
||||
FSE_flushCState(&blockStream, &stateLitLength);
|
||||
|
||||
{ size_t const streamSize = BIT_closeCStream(&blockStream);
|
||||
RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
|
||||
return streamSize;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
ZSTD_encodeSequences_default(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets)
|
||||
{
|
||||
return ZSTD_encodeSequences_body(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
|
||||
|
||||
#if DYNAMIC_BMI2
|
||||
|
||||
static BMI2_TARGET_ATTRIBUTE size_t
|
||||
ZSTD_encodeSequences_bmi2(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets)
|
||||
{
|
||||
return ZSTD_encodeSequences_body(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
size_t ZSTD_encodeSequences(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
|
||||
{
|
||||
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
|
||||
#if DYNAMIC_BMI2
|
||||
if (bmi2) {
|
||||
return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
#endif
|
||||
(void)bmi2;
|
||||
return ZSTD_encodeSequences_default(dst, dstCapacity,
|
||||
CTable_MatchLength, mlCodeTable,
|
||||
CTable_OffsetBits, ofCodeTable,
|
||||
CTable_LitLength, llCodeTable,
|
||||
sequences, nbSeq, longOffsets);
|
||||
}
|
||||
54
ext/zstd/lib/compress/zstd_compress_sequences.h
Normal file
54
ext/zstd/lib/compress/zstd_compress_sequences.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_SEQUENCES_H
|
||||
#define ZSTD_COMPRESS_SEQUENCES_H
|
||||
|
||||
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
|
||||
#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
|
||||
|
||||
typedef enum {
|
||||
ZSTD_defaultDisallowed = 0,
|
||||
ZSTD_defaultAllowed = 1
|
||||
} ZSTD_defaultPolicy_e;
|
||||
|
||||
symbolEncodingType_e
|
||||
ZSTD_selectEncodingType(
|
||||
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
|
||||
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
|
||||
FSE_CTable const* prevCTable,
|
||||
short const* defaultNorm, U32 defaultNormLog,
|
||||
ZSTD_defaultPolicy_e const isDefaultAllowed,
|
||||
ZSTD_strategy const strategy);
|
||||
|
||||
size_t
|
||||
ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
||||
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
|
||||
unsigned* count, U32 max,
|
||||
const BYTE* codeTable, size_t nbSeq,
|
||||
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
const FSE_CTable* prevCTable, size_t prevCTableSize,
|
||||
void* entropyWorkspace, size_t entropyWorkspaceSize);
|
||||
|
||||
size_t ZSTD_encodeSequences(
|
||||
void* dst, size_t dstCapacity,
|
||||
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|
||||
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
|
||||
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
|
||||
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
|
||||
|
||||
size_t ZSTD_fseBitCost(
|
||||
FSE_CTable const* ctable,
|
||||
unsigned const* count,
|
||||
unsigned const max);
|
||||
|
||||
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
|
||||
unsigned const* count, unsigned const max);
|
||||
#endif /* ZSTD_COMPRESS_SEQUENCES_H */
|
||||
577
ext/zstd/lib/compress/zstd_compress_superblock.c
vendored
Normal file
577
ext/zstd/lib/compress/zstd_compress_superblock.c
vendored
Normal file
@@ -0,0 +1,577 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "zstd_compress_superblock.h"
|
||||
|
||||
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
|
||||
#include "hist.h" /* HIST_countFast_wksp */
|
||||
#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
|
||||
#include "zstd_compress_sequences.h"
|
||||
#include "zstd_compress_literals.h"
|
||||
|
||||
/** ZSTD_compressSubBlock_literal() :
|
||||
* Compresses literals section for a sub-block.
|
||||
* When we have to write the Huffman table we will sometimes choose a header
|
||||
* size larger than necessary. This is because we have to pick the header size
|
||||
* before we know the table size + compressed size, so we have a bound on the
|
||||
* table size. If we guessed incorrectly, we fall back to uncompressed literals.
|
||||
*
|
||||
* We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
|
||||
* in writing the header, otherwise it is set to 0.
|
||||
*
|
||||
* hufMetadata->hType has literals block type info.
|
||||
* If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
|
||||
* If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
|
||||
* If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
|
||||
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
|
||||
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
|
||||
* @return : compressed size of literals section of a sub-block
|
||||
* Or 0 if unable to compress.
|
||||
* Or error code */
|
||||
static size_t
|
||||
ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
|
||||
const ZSTD_hufCTablesMetadata_t* hufMetadata,
|
||||
const BYTE* literals, size_t litSize,
|
||||
void* dst, size_t dstSize,
|
||||
const int bmi2, int writeEntropy, int* entropyWritten)
|
||||
{
|
||||
size_t const header = writeEntropy ? 200 : 0;
|
||||
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart + lhSize;
|
||||
U32 const singleStream = lhSize == 3;
|
||||
symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
|
||||
size_t cLitSize = 0;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
|
||||
|
||||
*entropyWritten = 0;
|
||||
if (litSize == 0 || hufMetadata->hType == set_basic) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
|
||||
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
||||
} else if (hufMetadata->hType == set_rle) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
|
||||
return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
|
||||
}
|
||||
|
||||
assert(litSize > 0);
|
||||
assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
|
||||
|
||||
if (writeEntropy && hufMetadata->hType == set_compressed) {
|
||||
ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
|
||||
op += hufMetadata->hufDesSize;
|
||||
cLitSize += hufMetadata->hufDesSize;
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
|
||||
}
|
||||
|
||||
{ int const flags = bmi2 ? HUF_flags_bmi2 : 0;
|
||||
const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags)
|
||||
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags);
|
||||
op += cSize;
|
||||
cLitSize += cSize;
|
||||
if (cSize == 0 || ERR_isError(cSize)) {
|
||||
DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
|
||||
return 0;
|
||||
}
|
||||
/* If we expand and we aren't writing a header then emit uncompressed */
|
||||
if (!writeEntropy && cLitSize >= litSize) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
|
||||
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
||||
}
|
||||
/* If we are writing headers then allow expansion that doesn't change our header size. */
|
||||
if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
|
||||
assert(cLitSize > litSize);
|
||||
DEBUGLOG(5, "Literals expanded beyond allowed header size");
|
||||
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
|
||||
}
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
|
||||
}
|
||||
|
||||
/* Build header */
|
||||
switch(lhSize)
|
||||
{
|
||||
case 3: /* 2 - 2 - 10 - 10 */
|
||||
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
|
||||
MEM_writeLE24(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 4: /* 2 - 2 - 14 - 14 */
|
||||
{ U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
break;
|
||||
}
|
||||
case 5: /* 2 - 2 - 18 - 18 */
|
||||
{ U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
|
||||
MEM_writeLE32(ostart, lhc);
|
||||
ostart[4] = (BYTE)(cLitSize >> 10);
|
||||
break;
|
||||
}
|
||||
default: /* not possible : lhSize is {3,4,5} */
|
||||
assert(0);
|
||||
}
|
||||
*entropyWritten = 1;
|
||||
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
static size_t
|
||||
ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
|
||||
const seqDef* sequences, size_t nbSeq,
|
||||
size_t litSize, int lastSequence)
|
||||
{
|
||||
const seqDef* const sstart = sequences;
|
||||
const seqDef* const send = sequences + nbSeq;
|
||||
const seqDef* sp = sstart;
|
||||
size_t matchLengthSum = 0;
|
||||
size_t litLengthSum = 0;
|
||||
(void)(litLengthSum); /* suppress unused variable warning on some environments */
|
||||
while (send-sp > 0) {
|
||||
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
|
||||
litLengthSum += seqLen.litLength;
|
||||
matchLengthSum += seqLen.matchLength;
|
||||
sp++;
|
||||
}
|
||||
assert(litLengthSum <= litSize);
|
||||
if (!lastSequence) {
|
||||
assert(litLengthSum == litSize);
|
||||
}
|
||||
return matchLengthSum + litSize;
|
||||
}
|
||||
|
||||
/** ZSTD_compressSubBlock_sequences() :
|
||||
* Compresses sequences section for a sub-block.
|
||||
* fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
|
||||
* symbol compression modes for the super-block.
|
||||
* The first successfully compressed block will have these in its header.
|
||||
* We set entropyWritten=1 when we succeed in compressing the sequences.
|
||||
* The following sub-blocks will always have repeat mode.
|
||||
* @return : compressed size of sequences section of a sub-block
|
||||
* Or 0 if it is unable to compress
|
||||
* Or error code. */
|
||||
static size_t
|
||||
ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
|
||||
const ZSTD_fseCTablesMetadata_t* fseMetadata,
|
||||
const seqDef* sequences, size_t nbSeq,
|
||||
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
|
||||
const ZSTD_CCtx_params* cctxParams,
|
||||
void* dst, size_t dstCapacity,
|
||||
const int bmi2, int writeEntropy, int* entropyWritten)
|
||||
{
|
||||
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart;
|
||||
BYTE* seqHead;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
|
||||
|
||||
*entropyWritten = 0;
|
||||
/* Sequences Header */
|
||||
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
|
||||
dstSize_tooSmall, "");
|
||||
if (nbSeq < 0x7F)
|
||||
*op++ = (BYTE)nbSeq;
|
||||
else if (nbSeq < LONGNBSEQ)
|
||||
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
|
||||
else
|
||||
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
|
||||
if (nbSeq==0) {
|
||||
return op - ostart;
|
||||
}
|
||||
|
||||
/* seqHead : flags for FSE encoding type */
|
||||
seqHead = op++;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
|
||||
|
||||
if (writeEntropy) {
|
||||
const U32 LLtype = fseMetadata->llType;
|
||||
const U32 Offtype = fseMetadata->ofType;
|
||||
const U32 MLtype = fseMetadata->mlType;
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
|
||||
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
|
||||
ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
|
||||
op += fseMetadata->fseTablesSize;
|
||||
} else {
|
||||
const U32 repeat = set_repeat;
|
||||
*seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
|
||||
}
|
||||
|
||||
{ size_t const bitstreamSize = ZSTD_encodeSequences(
|
||||
op, oend - op,
|
||||
fseTables->matchlengthCTable, mlCode,
|
||||
fseTables->offcodeCTable, ofCode,
|
||||
fseTables->litlengthCTable, llCode,
|
||||
sequences, nbSeq,
|
||||
longOffsets, bmi2);
|
||||
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
|
||||
op += bitstreamSize;
|
||||
/* zstd versions <= 1.3.4 mistakenly report corruption when
|
||||
* FSE_readNCount() receives a buffer < 4 bytes.
|
||||
* Fixed by https://github.com/facebook/zstd/pull/1146.
|
||||
* This can happen when the last set_compressed table present is 2
|
||||
* bytes and the bitstream is only one byte.
|
||||
* In this exceedingly rare case, we will simply emit an uncompressed
|
||||
* block, since it isn't worth optimizing.
|
||||
*/
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
|
||||
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
|
||||
assert(fseMetadata->lastCountSize + bitstreamSize == 3);
|
||||
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
|
||||
"emitting an uncompressed block.");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
|
||||
}
|
||||
|
||||
/* zstd versions <= 1.4.0 mistakenly report error when
|
||||
* sequences section body size is less than 3 bytes.
|
||||
* Fixed by https://github.com/facebook/zstd/pull/1664.
|
||||
* This can happen when the previous sequences section block is compressed
|
||||
* with rle mode and the current block's sequences section is compressed
|
||||
* with repeat mode where sequences section body size can be 1 byte.
|
||||
*/
|
||||
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
||||
if (op-seqHead < 4) {
|
||||
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
|
||||
"an uncompressed block when sequences are < 4 bytes");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
*entropyWritten = 1;
|
||||
return op - ostart;
|
||||
}
|
||||
|
||||
/** ZSTD_compressSubBlock() :
|
||||
* Compresses a single sub-block.
|
||||
* @return : compressed size of the sub-block
|
||||
* Or 0 if it failed to compress. */
|
||||
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
|
||||
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
||||
const seqDef* sequences, size_t nbSeq,
|
||||
const BYTE* literals, size_t litSize,
|
||||
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
|
||||
const ZSTD_CCtx_params* cctxParams,
|
||||
void* dst, size_t dstCapacity,
|
||||
const int bmi2,
|
||||
int writeLitEntropy, int writeSeqEntropy,
|
||||
int* litEntropyWritten, int* seqEntropyWritten,
|
||||
U32 lastBlock)
|
||||
{
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart + ZSTD_blockHeaderSize;
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
|
||||
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
|
||||
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
|
||||
&entropyMetadata->hufMetadata, literals, litSize,
|
||||
op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
|
||||
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
|
||||
if (cLitSize == 0) return 0;
|
||||
op += cLitSize;
|
||||
}
|
||||
{ size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
|
||||
&entropyMetadata->fseMetadata,
|
||||
sequences, nbSeq,
|
||||
llCode, mlCode, ofCode,
|
||||
cctxParams,
|
||||
op, oend-op,
|
||||
bmi2, writeSeqEntropy, seqEntropyWritten);
|
||||
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
|
||||
if (cSeqSize == 0) return 0;
|
||||
op += cSeqSize;
|
||||
}
|
||||
/* Write block header */
|
||||
{ size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
|
||||
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
|
||||
MEM_writeLE24(ostart, cBlockHeader24);
|
||||
}
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
|
||||
const ZSTD_hufCTables_t* huf,
|
||||
const ZSTD_hufCTablesMetadata_t* hufMetadata,
|
||||
void* workspace, size_t wkspSize,
|
||||
int writeEntropy)
|
||||
{
|
||||
unsigned* const countWksp = (unsigned*)workspace;
|
||||
unsigned maxSymbolValue = 255;
|
||||
size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
|
||||
|
||||
if (hufMetadata->hType == set_basic) return litSize;
|
||||
else if (hufMetadata->hType == set_rle) return 1;
|
||||
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
|
||||
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
|
||||
if (ZSTD_isError(largest)) return litSize;
|
||||
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
|
||||
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
|
||||
return cLitSizeEstimate + literalSectionHeaderSize;
|
||||
} }
|
||||
assert(0); /* impossible */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
|
||||
const BYTE* codeTable, unsigned maxCode,
|
||||
size_t nbSeq, const FSE_CTable* fseCTable,
|
||||
const U8* additionalBits,
|
||||
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
|
||||
void* workspace, size_t wkspSize)
|
||||
{
|
||||
unsigned* const countWksp = (unsigned*)workspace;
|
||||
const BYTE* ctp = codeTable;
|
||||
const BYTE* const ctStart = ctp;
|
||||
const BYTE* const ctEnd = ctStart + nbSeq;
|
||||
size_t cSymbolTypeSizeEstimateInBits = 0;
|
||||
unsigned max = maxCode;
|
||||
|
||||
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
|
||||
if (type == set_basic) {
|
||||
/* We selected this encoding type, so it must be valid. */
|
||||
assert(max <= defaultMax);
|
||||
cSymbolTypeSizeEstimateInBits = max <= defaultMax
|
||||
? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
|
||||
: ERROR(GENERIC);
|
||||
} else if (type == set_rle) {
|
||||
cSymbolTypeSizeEstimateInBits = 0;
|
||||
} else if (type == set_compressed || type == set_repeat) {
|
||||
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
|
||||
}
|
||||
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
|
||||
while (ctp < ctEnd) {
|
||||
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
|
||||
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
|
||||
ctp++;
|
||||
}
|
||||
return cSymbolTypeSizeEstimateInBits / 8;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
|
||||
const BYTE* llCodeTable,
|
||||
const BYTE* mlCodeTable,
|
||||
size_t nbSeq,
|
||||
const ZSTD_fseCTables_t* fseTables,
|
||||
const ZSTD_fseCTablesMetadata_t* fseMetadata,
|
||||
void* workspace, size_t wkspSize,
|
||||
int writeEntropy)
|
||||
{
|
||||
size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
|
||||
size_t cSeqSizeEstimate = 0;
|
||||
if (nbSeq == 0) return sequencesSectionHeaderSize;
|
||||
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
|
||||
nbSeq, fseTables->offcodeCTable, NULL,
|
||||
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
|
||||
workspace, wkspSize);
|
||||
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
|
||||
nbSeq, fseTables->litlengthCTable, LL_bits,
|
||||
LL_defaultNorm, LL_defaultNormLog, MaxLL,
|
||||
workspace, wkspSize);
|
||||
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
|
||||
nbSeq, fseTables->matchlengthCTable, ML_bits,
|
||||
ML_defaultNorm, ML_defaultNormLog, MaxML,
|
||||
workspace, wkspSize);
|
||||
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
|
||||
return cSeqSizeEstimate + sequencesSectionHeaderSize;
|
||||
}
|
||||
|
||||
static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
|
||||
const BYTE* ofCodeTable,
|
||||
const BYTE* llCodeTable,
|
||||
const BYTE* mlCodeTable,
|
||||
size_t nbSeq,
|
||||
const ZSTD_entropyCTables_t* entropy,
|
||||
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
||||
void* workspace, size_t wkspSize,
|
||||
int writeLitEntropy, int writeSeqEntropy) {
|
||||
size_t cSizeEstimate = 0;
|
||||
cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
|
||||
&entropy->huf, &entropyMetadata->hufMetadata,
|
||||
workspace, wkspSize, writeLitEntropy);
|
||||
cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
|
||||
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
|
||||
workspace, wkspSize, writeSeqEntropy);
|
||||
return cSizeEstimate + ZSTD_blockHeaderSize;
|
||||
}
|
||||
|
||||
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
|
||||
{
|
||||
if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
|
||||
return 1;
|
||||
if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
|
||||
return 1;
|
||||
if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** ZSTD_compressSubBlock_multi() :
|
||||
* Breaks super-block into multiple sub-blocks and compresses them.
|
||||
* Entropy will be written to the first block.
|
||||
* The following blocks will use repeat mode to compress.
|
||||
* All sub-blocks are compressed blocks (no raw or rle blocks).
|
||||
* @return : compressed size of the super block (which is multiple ZSTD blocks)
|
||||
* Or 0 if it failed to compress. */
|
||||
static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
|
||||
const ZSTD_compressedBlockState_t* prevCBlock,
|
||||
ZSTD_compressedBlockState_t* nextCBlock,
|
||||
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
||||
const ZSTD_CCtx_params* cctxParams,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const int bmi2, U32 lastBlock,
|
||||
void* workspace, size_t wkspSize)
|
||||
{
|
||||
const seqDef* const sstart = seqStorePtr->sequencesStart;
|
||||
const seqDef* const send = seqStorePtr->sequences;
|
||||
const seqDef* sp = sstart;
|
||||
const BYTE* const lstart = seqStorePtr->litStart;
|
||||
const BYTE* const lend = seqStorePtr->lit;
|
||||
const BYTE* lp = lstart;
|
||||
BYTE const* ip = (BYTE const*)src;
|
||||
BYTE const* const iend = ip + srcSize;
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstCapacity;
|
||||
BYTE* op = ostart;
|
||||
const BYTE* llCodePtr = seqStorePtr->llCode;
|
||||
const BYTE* mlCodePtr = seqStorePtr->mlCode;
|
||||
const BYTE* ofCodePtr = seqStorePtr->ofCode;
|
||||
size_t targetCBlockSize = cctxParams->targetCBlockSize;
|
||||
size_t litSize, seqCount;
|
||||
int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
|
||||
int writeSeqEntropy = 1;
|
||||
int lastSequence = 0;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
|
||||
(unsigned)(lend-lp), (unsigned)(send-sstart));
|
||||
|
||||
litSize = 0;
|
||||
seqCount = 0;
|
||||
do {
|
||||
size_t cBlockSizeEstimate = 0;
|
||||
if (sstart == send) {
|
||||
lastSequence = 1;
|
||||
} else {
|
||||
const seqDef* const sequence = sp + seqCount;
|
||||
lastSequence = sequence == send - 1;
|
||||
litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
|
||||
seqCount++;
|
||||
}
|
||||
if (lastSequence) {
|
||||
assert(lp <= lend);
|
||||
assert(litSize <= (size_t)(lend - lp));
|
||||
litSize = (size_t)(lend - lp);
|
||||
}
|
||||
/* I think there is an optimization opportunity here.
|
||||
* Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
|
||||
* since it recalculates estimate from scratch.
|
||||
* For example, it would recount literal distribution and symbol codes every time.
|
||||
*/
|
||||
cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
|
||||
&nextCBlock->entropy, entropyMetadata,
|
||||
workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
|
||||
if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
|
||||
int litEntropyWritten = 0;
|
||||
int seqEntropyWritten = 0;
|
||||
const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
|
||||
const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
|
||||
sp, seqCount,
|
||||
lp, litSize,
|
||||
llCodePtr, mlCodePtr, ofCodePtr,
|
||||
cctxParams,
|
||||
op, oend-op,
|
||||
bmi2, writeLitEntropy, writeSeqEntropy,
|
||||
&litEntropyWritten, &seqEntropyWritten,
|
||||
lastBlock && lastSequence);
|
||||
FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
|
||||
if (cSize > 0 && cSize < decompressedSize) {
|
||||
DEBUGLOG(5, "Committed the sub-block");
|
||||
assert(ip + decompressedSize <= iend);
|
||||
ip += decompressedSize;
|
||||
sp += seqCount;
|
||||
lp += litSize;
|
||||
op += cSize;
|
||||
llCodePtr += seqCount;
|
||||
mlCodePtr += seqCount;
|
||||
ofCodePtr += seqCount;
|
||||
litSize = 0;
|
||||
seqCount = 0;
|
||||
/* Entropy only needs to be written once */
|
||||
if (litEntropyWritten) {
|
||||
writeLitEntropy = 0;
|
||||
}
|
||||
if (seqEntropyWritten) {
|
||||
writeSeqEntropy = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (!lastSequence);
|
||||
if (writeLitEntropy) {
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
|
||||
ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
|
||||
}
|
||||
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
|
||||
/* If we haven't written our entropy tables, then we've violated our contract and
|
||||
* must emit an uncompressed block.
|
||||
*/
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
|
||||
return 0;
|
||||
}
|
||||
if (ip < iend) {
|
||||
size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
|
||||
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
|
||||
assert(cSize != 0);
|
||||
op += cSize;
|
||||
/* We have to regenerate the repcodes because we've skipped some sequences */
|
||||
if (sp < send) {
|
||||
seqDef const* seq;
|
||||
repcodes_t rep;
|
||||
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
|
||||
for (seq = sstart; seq < sp; ++seq) {
|
||||
ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
|
||||
}
|
||||
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
|
||||
}
|
||||
}
|
||||
DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
|
||||
return op-ostart;
|
||||
}
|
||||
|
||||
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
|
||||
void* dst, size_t dstCapacity,
|
||||
void const* src, size_t srcSize,
|
||||
unsigned lastBlock) {
|
||||
ZSTD_entropyCTablesMetadata_t entropyMetadata;
|
||||
|
||||
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
|
||||
&zc->blockState.prevCBlock->entropy,
|
||||
&zc->blockState.nextCBlock->entropy,
|
||||
&zc->appliedParams,
|
||||
&entropyMetadata,
|
||||
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
|
||||
|
||||
return ZSTD_compressSubBlock_multi(&zc->seqStore,
|
||||
zc->blockState.prevCBlock,
|
||||
zc->blockState.nextCBlock,
|
||||
&entropyMetadata,
|
||||
&zc->appliedParams,
|
||||
dst, dstCapacity,
|
||||
src, srcSize,
|
||||
zc->bmi2, lastBlock,
|
||||
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
|
||||
}
|
||||
32
ext/zstd/lib/compress/zstd_compress_superblock.h
Normal file
32
ext/zstd/lib/compress/zstd_compress_superblock.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_COMPRESS_ADVANCED_H
|
||||
#define ZSTD_COMPRESS_ADVANCED_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
|
||||
#include "../zstd.h" /* ZSTD_CCtx */
|
||||
|
||||
/*-*************************************
|
||||
* Target Compressed Block Size
|
||||
***************************************/
|
||||
|
||||
/* ZSTD_compressSuperBlock() :
|
||||
* Used to compress a super block when targetCBlockSize is being used.
|
||||
* The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
|
||||
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
|
||||
void* dst, size_t dstCapacity,
|
||||
void const* src, size_t srcSize,
|
||||
unsigned lastBlock);
|
||||
|
||||
#endif /* ZSTD_COMPRESS_ADVANCED_H */
|
||||
742
ext/zstd/lib/compress/zstd_cwksp.h
Normal file
742
ext/zstd/lib/compress/zstd_cwksp.h
Normal file
@@ -0,0 +1,742 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_CWKSP_H
|
||||
#define ZSTD_CWKSP_H
|
||||
|
||||
/*-*************************************
|
||||
* Dependencies
|
||||
***************************************/
|
||||
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
||||
#include "../common/zstd_internal.h"
|
||||
#include "../common/portability_macros.h"
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*-*************************************
|
||||
* Constants
|
||||
***************************************/
|
||||
|
||||
/* Since the workspace is effectively its own little malloc implementation /
|
||||
* arena, when we run under ASAN, we should similarly insert redzones between
|
||||
* each internal element of the workspace, so ASAN will catch overruns that
|
||||
* reach outside an object but that stay inside the workspace.
|
||||
*
|
||||
* This defines the size of that redzone.
|
||||
*/
|
||||
#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
|
||||
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
|
||||
#endif
|
||||
|
||||
|
||||
/* Set our tables and aligneds to align by 64 bytes */
|
||||
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
|
||||
|
||||
/*-*************************************
|
||||
* Structures
|
||||
***************************************/
|
||||
typedef enum {
|
||||
ZSTD_cwksp_alloc_objects,
|
||||
ZSTD_cwksp_alloc_aligned_init_once,
|
||||
ZSTD_cwksp_alloc_aligned,
|
||||
ZSTD_cwksp_alloc_buffers
|
||||
} ZSTD_cwksp_alloc_phase_e;
|
||||
|
||||
/**
|
||||
* Used to describe whether the workspace is statically allocated (and will not
|
||||
* necessarily ever be freed), or if it's dynamically allocated and we can
|
||||
* expect a well-formed caller to free this.
|
||||
*/
|
||||
typedef enum {
|
||||
ZSTD_cwksp_dynamic_alloc,
|
||||
ZSTD_cwksp_static_alloc
|
||||
} ZSTD_cwksp_static_alloc_e;
|
||||
|
||||
/**
|
||||
* Zstd fits all its internal datastructures into a single continuous buffer,
|
||||
* so that it only needs to perform a single OS allocation (or so that a buffer
|
||||
* can be provided to it and it can perform no allocations at all). This buffer
|
||||
* is called the workspace.
|
||||
*
|
||||
* Several optimizations complicate that process of allocating memory ranges
|
||||
* from this workspace for each internal datastructure:
|
||||
*
|
||||
* - These different internal datastructures have different setup requirements:
|
||||
*
|
||||
* - The static objects need to be cleared once and can then be trivially
|
||||
* reused for each compression.
|
||||
*
|
||||
* - Various buffers don't need to be initialized at all--they are always
|
||||
* written into before they're read.
|
||||
*
|
||||
* - The matchstate tables have a unique requirement that they don't need
|
||||
* their memory to be totally cleared, but they do need the memory to have
|
||||
* some bound, i.e., a guarantee that all values in the memory they've been
|
||||
* allocated is less than some maximum value (which is the starting value
|
||||
* for the indices that they will then use for compression). When this
|
||||
* guarantee is provided to them, they can use the memory without any setup
|
||||
* work. When it can't, they have to clear the area.
|
||||
*
|
||||
* - These buffers also have different alignment requirements.
|
||||
*
|
||||
* - We would like to reuse the objects in the workspace for multiple
|
||||
* compressions without having to perform any expensive reallocation or
|
||||
* reinitialization work.
|
||||
*
|
||||
* - We would like to be able to efficiently reuse the workspace across
|
||||
* multiple compressions **even when the compression parameters change** and
|
||||
* we need to resize some of the objects (where possible).
|
||||
*
|
||||
* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
|
||||
* abstraction was created. It works as follows:
|
||||
*
|
||||
* Workspace Layout:
|
||||
*
|
||||
* [ ... workspace ... ]
|
||||
* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
|
||||
*
|
||||
* The various objects that live in the workspace are divided into the
|
||||
* following categories, and are allocated separately:
|
||||
*
|
||||
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
||||
* so that literally everything fits in a single buffer. Note: if present,
|
||||
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
|
||||
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
||||
* required.
|
||||
*
|
||||
* - Fixed size objects: these are fixed-size, fixed-count objects that are
|
||||
* nonetheless "dynamically" allocated in the workspace so that we can
|
||||
* control how they're initialized separately from the broader ZSTD_CCtx.
|
||||
* Examples:
|
||||
* - Entropy Workspace
|
||||
* - 2 x ZSTD_compressedBlockState_t
|
||||
* - CDict dictionary contents
|
||||
*
|
||||
* - Tables: these are any of several different datastructures (hash tables,
|
||||
* chain tables, binary trees) that all respect a common format: they are
|
||||
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
||||
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
||||
*
|
||||
* - Init once: these buffers require to be initialized at least once before
|
||||
* use. They should be used when we want to skip memory initialization
|
||||
* while not triggering memory checkers (like Valgrind) when reading from
|
||||
* from this memory without writing to it first.
|
||||
* These buffers should be used carefully as they might contain data
|
||||
* from previous compressions.
|
||||
* Buffers are aligned to 64 bytes.
|
||||
*
|
||||
* - Aligned: these buffers don't require any initialization before they're
|
||||
* used. The user of the buffer should make sure they write into a buffer
|
||||
* location before reading from it.
|
||||
* Buffers are aligned to 64 bytes.
|
||||
*
|
||||
* - Buffers: these buffers are used for various purposes that don't require
|
||||
* any alignment or initialization before they're used. This means they can
|
||||
* be moved around at no cost for a new compression.
|
||||
*
|
||||
* Allocating Memory:
|
||||
*
|
||||
* The various types of objects must be allocated in order, so they can be
|
||||
* correctly packed into the workspace buffer. That order is:
|
||||
*
|
||||
* 1. Objects
|
||||
* 2. Init once / Tables
|
||||
* 3. Aligned / Tables
|
||||
* 4. Buffers / Tables
|
||||
*
|
||||
* Attempts to reserve objects of different types out of order will fail.
|
||||
*/
|
||||
typedef struct {
|
||||
void* workspace;
|
||||
void* workspaceEnd;
|
||||
|
||||
void* objectEnd;
|
||||
void* tableEnd;
|
||||
void* tableValidEnd;
|
||||
void* allocStart;
|
||||
void* initOnceStart;
|
||||
|
||||
BYTE allocFailed;
|
||||
int workspaceOversizedDuration;
|
||||
ZSTD_cwksp_alloc_phase_e phase;
|
||||
ZSTD_cwksp_static_alloc_e isStatic;
|
||||
} ZSTD_cwksp;
|
||||
|
||||
/*-*************************************
|
||||
* Functions
|
||||
***************************************/
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
||||
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
||||
(void)ws;
|
||||
assert(ws->workspace <= ws->objectEnd);
|
||||
assert(ws->objectEnd <= ws->tableEnd);
|
||||
assert(ws->objectEnd <= ws->tableValidEnd);
|
||||
assert(ws->tableEnd <= ws->allocStart);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
assert(ws->allocStart <= ws->workspaceEnd);
|
||||
assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
|
||||
assert(ws->workspace <= ws->initOnceStart);
|
||||
#if ZSTD_MEMORY_SANITIZER
|
||||
{
|
||||
intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
|
||||
(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
|
||||
#if defined(ZSTD_MSAN_PRINT)
|
||||
if(offset!=-1) {
|
||||
__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
|
||||
}
|
||||
#endif
|
||||
assert(offset==-1);
|
||||
};
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Align must be a power of 2.
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
||||
size_t const mask = align - 1;
|
||||
assert((align & mask) == 0);
|
||||
return (size + mask) & ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this to determine how much space in the workspace we will consume to
|
||||
* allocate this object. (Normally it should be exactly the size of the object,
|
||||
* but under special conditions, like ASAN, where we pad each object, it might
|
||||
* be larger.)
|
||||
*
|
||||
* Since tables aren't currently redzoned, you don't need to call through this
|
||||
* to figure out how much space you need for the matchState tables. Everything
|
||||
* else is though.
|
||||
*
|
||||
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
|
||||
if (size == 0)
|
||||
return 0;
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
#else
|
||||
return size;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
|
||||
* Used to determine the number of bytes required for a given "aligned".
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
||||
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the amount of additional space the cwksp must allocate
|
||||
* for internal purposes (currently only alignment).
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
||||
/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
|
||||
* bytes to align the beginning of tables section and end of buffers;
|
||||
*/
|
||||
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
|
||||
return slackSpace;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the number of additional bytes required to align a pointer to the given number of bytes.
|
||||
* alignBytes must be a power of two.
|
||||
*/
|
||||
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
|
||||
size_t const alignBytesMask = alignBytes - 1;
|
||||
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
||||
assert((alignBytes & alignBytesMask) == 0);
|
||||
assert(bytes < alignBytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the initial value for allocStart which is used to determine the position from
|
||||
* which we can allocate from the end of the workspace.
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
|
||||
return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function. Do not use directly.
|
||||
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
||||
* which counts from the end of the wksp (as opposed to the object/table segment).
|
||||
*
|
||||
* Returns a pointer to the beginning of that space.
|
||||
*/
|
||||
MEM_STATIC void*
|
||||
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
||||
{
|
||||
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
||||
void* const bottom = ws->tableEnd;
|
||||
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
||||
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
assert(alloc >= bottom);
|
||||
if (alloc < bottom) {
|
||||
DEBUGLOG(4, "cwksp: alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
/* the area is reserved from the end of wksp.
|
||||
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
||||
if (alloc < ws->tableValidEnd) {
|
||||
ws->tableValidEnd = alloc;
|
||||
}
|
||||
ws->allocStart = alloc;
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the cwksp to the next phase, and does any necessary allocations.
|
||||
* cwksp initialization must necessarily go through each phase in order.
|
||||
* Returns a 0 on success, or zstd error
|
||||
*/
|
||||
MEM_STATIC size_t
|
||||
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
||||
{
|
||||
assert(phase >= ws->phase);
|
||||
if (phase > ws->phase) {
|
||||
/* Going from allocating objects to allocating initOnce / tables */
|
||||
if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
|
||||
phase >= ZSTD_cwksp_alloc_aligned_init_once) {
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
||||
|
||||
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
||||
void *const alloc = ws->objectEnd;
|
||||
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
void *const objectEnd = (BYTE *) alloc + bytesToAlign;
|
||||
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
||||
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
||||
"table phase - alignment initial allocation failed!");
|
||||
ws->objectEnd = objectEnd;
|
||||
ws->tableEnd = objectEnd; /* table area starts being empty */
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ws->tableValidEnd = ws->tableEnd;
|
||||
}
|
||||
}
|
||||
}
|
||||
ws->phase = phase;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether this object/buffer/etc was allocated in this workspace.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
||||
{
|
||||
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function. Do not use directly.
|
||||
*/
|
||||
MEM_STATIC void*
|
||||
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
||||
{
|
||||
void* alloc;
|
||||
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* over-reserve space */
|
||||
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
#endif
|
||||
|
||||
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
||||
* either size. */
|
||||
if (alloc) {
|
||||
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
/* We need to keep the redzone poisoned while unpoisoning the bytes that
|
||||
* are actually allocated. */
|
||||
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns unaligned memory.
|
||||
*/
|
||||
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
||||
* This memory has been initialized at least once in the past.
|
||||
* This doesn't mean it has been initialized this time, and it might contain data from previous
|
||||
* operations.
|
||||
* The main usage is for algorithms that might need read access into uninitialized memory.
|
||||
* The algorithm must maintain safety under these conditions and must make sure it doesn't
|
||||
* leak any of the past data (directly or in side channels).
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
||||
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
|
||||
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
||||
if(ptr && ptr < ws->initOnceStart) {
|
||||
/* We assume the memory following the current allocation is either:
|
||||
* 1. Not usable as initOnce memory (end of workspace)
|
||||
* 2. Another initOnce buffer that has been allocated before (and so was previously memset)
|
||||
* 3. An ASAN redzone, in which case we don't want to write on it
|
||||
* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
|
||||
* Note that we assume here that MSAN and ASAN cannot run in the same time. */
|
||||
ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
|
||||
ws->initOnceStart = ptr;
|
||||
}
|
||||
#if ZSTD_MEMORY_SANITIZER
|
||||
assert(__msan_test_shadow(ptr, bytes) == -1);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
||||
ZSTD_cwksp_alloc_aligned);
|
||||
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligned on 64 bytes. These buffers have the special property that
|
||||
* their values remain constrained, allowing us to re-use them without
|
||||
* memset()-ing them.
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
|
||||
void* alloc;
|
||||
void* end;
|
||||
void* top;
|
||||
|
||||
/* We can only start allocating tables after we are done reserving space for objects at the
|
||||
* start of the workspace */
|
||||
if(ws->phase < phase) {
|
||||
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
alloc = ws->tableEnd;
|
||||
end = (BYTE *)alloc + bytes;
|
||||
top = ws->allocStart;
|
||||
|
||||
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
|
||||
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
||||
assert((bytes & (sizeof(U32)-1)) == 0);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
assert(end <= top);
|
||||
if (end > top) {
|
||||
DEBUGLOG(4, "cwksp: table alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
ws->tableEnd = end;
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
__asan_unpoison_memory_region(alloc, bytes);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
|
||||
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
||||
return alloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aligned on sizeof(void*).
|
||||
* Note : should happen only once, at workspace first initialization
|
||||
*/
|
||||
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
||||
{
|
||||
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
||||
void* alloc = ws->objectEnd;
|
||||
void* end = (BYTE*)alloc + roundedBytes;
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* over-reserve space */
|
||||
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
#endif
|
||||
|
||||
DEBUGLOG(4,
|
||||
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
||||
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
||||
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
||||
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
/* we must be in the first phase, no advance is possible */
|
||||
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
||||
DEBUGLOG(3, "cwksp: object alloc failed!");
|
||||
ws->allocFailed = 1;
|
||||
return NULL;
|
||||
}
|
||||
ws->objectEnd = end;
|
||||
ws->tableEnd = end;
|
||||
ws->tableValidEnd = end;
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
||||
* either size. */
|
||||
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
__asan_unpoison_memory_region(alloc, bytes);
|
||||
}
|
||||
#endif
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
||||
{
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the table re-use logic is sound, and that we don't
|
||||
* access table space that we haven't cleaned, we re-"poison" the table
|
||||
* space every time we mark it dirty.
|
||||
* Since tableValidEnd space and initOnce space may overlap we don't poison
|
||||
* the initOnce portion as it break its promise. This means that this poisoning
|
||||
* check isn't always applied fully. */
|
||||
{
|
||||
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
||||
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
||||
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
||||
__msan_poison(ws->objectEnd, size);
|
||||
} else {
|
||||
assert(ws->initOnceStart >= ws->objectEnd);
|
||||
__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ws->tableValidEnd = ws->tableEnd;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Zero the part of the allocated tables not already marked clean.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
|
||||
assert(ws->tableValidEnd >= ws->objectEnd);
|
||||
assert(ws->tableValidEnd <= ws->allocStart);
|
||||
if (ws->tableValidEnd < ws->tableEnd) {
|
||||
ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
|
||||
}
|
||||
ZSTD_cwksp_mark_tables_clean(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates table allocations.
|
||||
* All other allocations remain valid.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: clearing tables!");
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* We don't do this when the workspace is statically allocated, because
|
||||
* when that is the case, we have no capability to hook into the end of the
|
||||
* workspace's lifecycle to unpoison the memory.
|
||||
*/
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
||||
__asan_poison_memory_region(ws->objectEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates all buffer, aligned, and table allocations.
|
||||
* Object allocations remain valid.
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
||||
DEBUGLOG(4, "cwksp: clearing!");
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
||||
/* To validate that the context re-use logic is sound, and that we don't
|
||||
* access stuff that this compression hasn't initialized, we re-"poison"
|
||||
* the workspace except for the areas in which we expect memory re-use
|
||||
* without initialization (objects, valid tables area and init once
|
||||
* memory). */
|
||||
{
|
||||
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
||||
size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
|
||||
__msan_poison(ws->tableValidEnd, size);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
||||
/* We don't do this when the workspace is statically allocated, because
|
||||
* when that is the case, we have no capability to hook into the end of the
|
||||
* workspace's lifecycle to unpoison the memory.
|
||||
*/
|
||||
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
||||
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
|
||||
__asan_poison_memory_region(ws->objectEnd, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
ws->tableEnd = ws->objectEnd;
|
||||
ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
|
||||
ws->allocFailed = 0;
|
||||
if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
|
||||
ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
|
||||
}
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
/**
|
||||
* The provided workspace takes ownership of the buffer [start, start+size).
|
||||
* Any existing values in the workspace are ignored (the previously managed
|
||||
* buffer, if present, must be separately freed).
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
|
||||
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
||||
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
||||
ws->workspace = start;
|
||||
ws->workspaceEnd = (BYTE*)start + size;
|
||||
ws->objectEnd = ws->workspace;
|
||||
ws->tableValidEnd = ws->objectEnd;
|
||||
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
||||
ws->phase = ZSTD_cwksp_alloc_objects;
|
||||
ws->isStatic = isStatic;
|
||||
ZSTD_cwksp_clear(ws);
|
||||
ws->workspaceOversizedDuration = 0;
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
||||
void* workspace = ZSTD_customMalloc(size, customMem);
|
||||
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
||||
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
|
||||
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
||||
void *ptr = ws->workspace;
|
||||
DEBUGLOG(4, "cwksp: freeing workspace");
|
||||
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
|
||||
ZSTD_customFree(ptr, customMem);
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
||||
* is left in an invalid state (src must be re-init()'ed before it's used again).
|
||||
*/
|
||||
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
||||
*dst = *src;
|
||||
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
|
||||
}
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
|
||||
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
||||
return ws->allocFailed;
|
||||
}
|
||||
|
||||
/*-*************************************
|
||||
* Functions Checking Free Space
|
||||
***************************************/
|
||||
|
||||
/* ZSTD_alignmentSpaceWithinBounds() :
|
||||
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
||||
* actual amount of space used.
|
||||
*/
|
||||
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
|
||||
/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
|
||||
* the alignment bytes difference between estimation and actual usage */
|
||||
return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
|
||||
ZSTD_cwksp_used(ws) <= estimatedSpace;
|
||||
}
|
||||
|
||||
|
||||
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
||||
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_check_available(
|
||||
ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
|
||||
}
|
||||
|
||||
MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
|
||||
&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
|
||||
}
|
||||
|
||||
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
|
||||
ZSTD_cwksp* ws, size_t additionalNeededSpace) {
|
||||
if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
|
||||
ws->workspaceOversizedDuration++;
|
||||
} else {
|
||||
ws->workspaceOversizedDuration = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_CWKSP_H */
|
||||
758
ext/zstd/lib/compress/zstd_double_fast.c
vendored
Normal file
758
ext/zstd/lib/compress/zstd_double_fast.c
vendored
Normal file
@@ -0,0 +1,758 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
#include "zstd_double_fast.h"
|
||||
|
||||
static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashLarge = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash tables.
|
||||
* Insert the other positions into the large hash table if their entry
|
||||
* is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls);
|
||||
size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8);
|
||||
if (i == 0) {
|
||||
ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i);
|
||||
}
|
||||
if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {
|
||||
ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i);
|
||||
}
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
if (dtlm == ZSTD_dtlm_fast)
|
||||
break;
|
||||
} }
|
||||
}
|
||||
|
||||
static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashLarge = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32 const mls = cParams->minMatch;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash tables.
|
||||
* Insert the other positions into the large hash table if their entry
|
||||
* is empty.
|
||||
*/
|
||||
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
U32 i;
|
||||
for (i = 0; i < fastHashFillStep; ++i) {
|
||||
size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
|
||||
size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
|
||||
if (i == 0)
|
||||
hashSmall[smHash] = curr + i;
|
||||
if (i == 0 || hashLarge[lgHash] == 0)
|
||||
hashLarge[lgHash] = curr + i;
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
if (dtlm == ZSTD_dtlm_fast)
|
||||
break;
|
||||
} }
|
||||
}
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp)
|
||||
{
|
||||
if (tfp == ZSTD_tfp_forCDict) {
|
||||
ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm);
|
||||
} else {
|
||||
ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize, U32 const mls /* template */)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
const U32 hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
const U32 hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
/* presumes that, if there is a dictionary, it must be using Attach mode */
|
||||
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||
const BYTE* const prefixLowest = base + prefixLowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved1 = 0, offsetSaved2 = 0;
|
||||
|
||||
size_t mLength;
|
||||
U32 offset;
|
||||
U32 curr;
|
||||
|
||||
/* how many positions to search before increasing step size */
|
||||
const size_t kStepIncr = 1 << kSearchStrength;
|
||||
/* the position at which to increment the step size if no match is found */
|
||||
const BYTE* nextStep;
|
||||
size_t step; /* the current step size */
|
||||
|
||||
size_t hl0; /* the long hash at ip */
|
||||
size_t hl1; /* the long hash at ip1 */
|
||||
|
||||
U32 idxl0; /* the long match index for ip */
|
||||
U32 idxl1; /* the long match index for ip1 */
|
||||
|
||||
const BYTE* matchl0; /* the long match for ip */
|
||||
const BYTE* matchs0; /* the short match for ip */
|
||||
const BYTE* matchl1; /* the long match for ip1 */
|
||||
|
||||
const BYTE* ip = istart; /* the current position */
|
||||
const BYTE* ip1; /* the next position */
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
|
||||
|
||||
/* init */
|
||||
ip += ((ip - prefixLowest) == 0);
|
||||
{
|
||||
U32 const current = (U32)(ip - base);
|
||||
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
|
||||
U32 const maxRep = current - windowLow;
|
||||
if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
|
||||
if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* Outer Loop: one iteration per match found and stored */
|
||||
while (1) {
|
||||
step = 1;
|
||||
nextStep = ip + kStepIncr;
|
||||
ip1 = ip + step;
|
||||
|
||||
if (ip1 > ilimit) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
idxl0 = hashLong[hl0];
|
||||
matchl0 = base + idxl0;
|
||||
|
||||
/* Inner Loop: one iteration per search / position */
|
||||
do {
|
||||
const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
const U32 idxs0 = hashSmall[hs0];
|
||||
curr = (U32)(ip-base);
|
||||
matchs0 = base + idxs0;
|
||||
|
||||
hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
|
||||
|
||||
/* check noDict repcode */
|
||||
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
|
||||
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
goto _match_stored;
|
||||
}
|
||||
|
||||
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
|
||||
|
||||
if (idxl0 > prefixLowestIndex) {
|
||||
/* check prefix long match */
|
||||
if (MEM_read64(matchl0) == MEM_read64(ip)) {
|
||||
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
|
||||
offset = (U32)(ip-matchl0);
|
||||
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
}
|
||||
|
||||
idxl1 = hashLong[hl1];
|
||||
matchl1 = base + idxl1;
|
||||
|
||||
if (idxs0 > prefixLowestIndex) {
|
||||
/* check prefix short match */
|
||||
if (MEM_read32(matchs0) == MEM_read32(ip)) {
|
||||
goto _search_next_long;
|
||||
}
|
||||
}
|
||||
|
||||
if (ip1 >= nextStep) {
|
||||
PREFETCH_L1(ip1 + 64);
|
||||
PREFETCH_L1(ip1 + 128);
|
||||
step++;
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
ip = ip1;
|
||||
ip1 += step;
|
||||
|
||||
hl0 = hl1;
|
||||
idxl0 = idxl1;
|
||||
matchl0 = matchl1;
|
||||
#if defined(__aarch64__)
|
||||
PREFETCH_L1(ip+256);
|
||||
#endif
|
||||
} while (ip1 <= ilimit);
|
||||
|
||||
_cleanup:
|
||||
/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
|
||||
* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
|
||||
offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved1;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
|
||||
_search_next_long:
|
||||
|
||||
/* check prefix long +1 match */
|
||||
if (idxl1 > prefixLowestIndex) {
|
||||
if (MEM_read64(matchl1) == MEM_read64(ip1)) {
|
||||
ip = ip1;
|
||||
mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
|
||||
offset = (U32)(ip-matchl1);
|
||||
while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
}
|
||||
|
||||
/* if no long +1 match, explore the short match we found */
|
||||
mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
|
||||
offset = (U32)(ip - matchs0);
|
||||
while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
|
||||
|
||||
/* fall-through */
|
||||
|
||||
_match_found: /* requires ip, offset, mLength */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
if (step < 4) {
|
||||
/* It is unsafe to write this value back to the hashtable when ip1 is
|
||||
* greater than or equal to the new ip we will have after we're done
|
||||
* processing this match. Rather than perform that test directly
|
||||
* (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
|
||||
* more predictable test. The minmatch even if we take a short match is
|
||||
* 4 bytes, so as long as step, the distance between ip and ip1
|
||||
* (initially) is less than 4, we know ip1 < new ip. */
|
||||
hashLong[hl1] = (U32)(ip1 - base);
|
||||
}
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
_match_stored:
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Complementary insertion */
|
||||
/* done after iLimit test, as candidates could be > iend-8 */
|
||||
{ U32 const indexToInsert = curr+2;
|
||||
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while ( (ip <= ilimit)
|
||||
&& ( (offset_2>0)
|
||||
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
|
||||
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
|
||||
ip += rLength;
|
||||
anchor = ip;
|
||||
continue; /* faster when present ... (?) */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
const U32 hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
const U32 hBitsS = cParams->chainLog;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
/* presumes that, if there is a dictionary, it must be using Attach mode */
|
||||
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||
const BYTE* const prefixLowest = base + prefixLowestIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
const ZSTD_matchState_t* const dms = ms->dictMatchState;
|
||||
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
|
||||
const U32* const dictHashLong = dms->hashTable;
|
||||
const U32* const dictHashSmall = dms->chainTable;
|
||||
const U32 dictStartIndex = dms->window.dictLimit;
|
||||
const BYTE* const dictBase = dms->window.base;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const BYTE* const dictEnd = dms->window.nextSrc;
|
||||
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
|
||||
const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
|
||||
|
||||
/* if a dictionary is attached, it must be within window range */
|
||||
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
|
||||
|
||||
if (ms->prefetchCDictTables) {
|
||||
size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
|
||||
size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
|
||||
PREFETCH_AREA(dictHashLong, hashTableBytes)
|
||||
PREFETCH_AREA(dictHashSmall, chainTableBytes)
|
||||
}
|
||||
|
||||
/* init */
|
||||
ip += (dictAndPrefixLength == 0);
|
||||
|
||||
/* dictMatchState repCode checks don't currently handle repCode == 0
|
||||
* disabling. */
|
||||
assert(offset_1 <= dictAndPrefixLength);
|
||||
assert(offset_2 <= dictAndPrefixLength);
|
||||
|
||||
/* Main Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
|
||||
size_t mLength;
|
||||
U32 offset;
|
||||
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8);
|
||||
size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls);
|
||||
U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL);
|
||||
int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS);
|
||||
U32 const curr = (U32)(ip-base);
|
||||
U32 const matchIndexL = hashLong[h2];
|
||||
U32 matchIndexS = hashSmall[h];
|
||||
const BYTE* matchLong = base + matchIndexL;
|
||||
const BYTE* match = base + matchIndexS;
|
||||
const U32 repIndex = curr + 1 - offset_1;
|
||||
const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
|
||||
dictBase + (repIndex - dictIndexDelta) :
|
||||
base + repIndex;
|
||||
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
|
||||
|
||||
/* check repcode */
|
||||
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
goto _match_stored;
|
||||
}
|
||||
|
||||
if (matchIndexL > prefixLowestIndex) {
|
||||
/* check prefix long match */
|
||||
if (MEM_read64(matchLong) == MEM_read64(ip)) {
|
||||
mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
|
||||
offset = (U32)(ip-matchLong);
|
||||
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
} else if (dictTagsMatchL) {
|
||||
/* check dictMatchState long match */
|
||||
U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
|
||||
assert(dictMatchL < dictEnd);
|
||||
|
||||
if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
|
||||
mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
|
||||
offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
|
||||
while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
} }
|
||||
|
||||
if (matchIndexS > prefixLowestIndex) {
|
||||
/* check prefix short match */
|
||||
if (MEM_read32(match) == MEM_read32(ip)) {
|
||||
goto _search_next_long;
|
||||
}
|
||||
} else if (dictTagsMatchS) {
|
||||
/* check dictMatchState short match */
|
||||
U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
match = dictBase + dictMatchIndexS;
|
||||
matchIndexS = dictMatchIndexS + dictIndexDelta;
|
||||
|
||||
if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
|
||||
goto _search_next_long;
|
||||
} }
|
||||
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
#if defined(__aarch64__)
|
||||
PREFETCH_L1(ip+256);
|
||||
#endif
|
||||
continue;
|
||||
|
||||
_search_next_long:
|
||||
{ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
|
||||
U32 const matchIndexL3 = hashLong[hl3];
|
||||
U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3);
|
||||
const BYTE* matchL3 = base + matchIndexL3;
|
||||
hashLong[hl3] = curr + 1;
|
||||
|
||||
/* check prefix long +1 match */
|
||||
if (matchIndexL3 > prefixLowestIndex) {
|
||||
if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
|
||||
mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
|
||||
ip++;
|
||||
offset = (U32)(ip-matchL3);
|
||||
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
}
|
||||
} else if (dictTagsMatchL3) {
|
||||
/* check dict long +1 match */
|
||||
U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
|
||||
assert(dictMatchL3 < dictEnd);
|
||||
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
|
||||
mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
|
||||
ip++;
|
||||
offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
|
||||
while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
|
||||
goto _match_found;
|
||||
} } }
|
||||
|
||||
/* if no long +1 match, explore the short match we found */
|
||||
if (matchIndexS < prefixLowestIndex) {
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
|
||||
offset = (U32)(curr - matchIndexS);
|
||||
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
} else {
|
||||
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
|
||||
offset = (U32)(ip - match);
|
||||
while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
|
||||
_match_found:
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
_match_stored:
|
||||
/* match found */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Complementary insertion */
|
||||
/* done after iLimit test, as candidates could be > iend-8 */
|
||||
{ U32 const indexToInsert = curr+2;
|
||||
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
|
||||
dictBase + repIndex2 - dictIndexDelta :
|
||||
base + repIndex2;
|
||||
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} /* while (ip < ilimit) */
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
}
|
||||
|
||||
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
|
||||
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
|
||||
void const* src, size_t srcSize) \
|
||||
{ \
|
||||
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
|
||||
}
|
||||
|
||||
ZSTD_GEN_DFAST_FN(noDict, 4)
|
||||
ZSTD_GEN_DFAST_FN(noDict, 5)
|
||||
ZSTD_GEN_DFAST_FN(noDict, 6)
|
||||
ZSTD_GEN_DFAST_FN(noDict, 7)
|
||||
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 4)
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 5)
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 6)
|
||||
ZSTD_GEN_DFAST_FN(dictMatchState, 7)
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
const U32 mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls /* template */)
|
||||
{
|
||||
ZSTD_compressionParameters const* cParams = &ms->cParams;
|
||||
U32* const hashLong = ms->hashTable;
|
||||
U32 const hBitsL = cParams->hashLog;
|
||||
U32* const hashSmall = ms->chainTable;
|
||||
U32 const hBitsS = cParams->chainLog;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip = istart;
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
|
||||
const U32 dictStartIndex = lowLimit;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const BYTE* const dictEnd = dictBase + prefixStartIndex;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
|
||||
|
||||
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
|
||||
if (prefixStartIndex == dictStartIndex)
|
||||
return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
|
||||
|
||||
/* Search Loop */
|
||||
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
|
||||
const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
|
||||
const U32 matchIndex = hashSmall[hSmall];
|
||||
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* match = matchBase + matchIndex;
|
||||
|
||||
const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
|
||||
const U32 matchLongIndex = hashLong[hLong];
|
||||
const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* matchLong = matchLongBase + matchLongIndex;
|
||||
|
||||
const U32 curr = (U32)(ip-base);
|
||||
const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
|
||||
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* const repMatch = repBase + repIndex;
|
||||
size_t mLength;
|
||||
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
|
||||
|
||||
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
|
||||
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
|
||||
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip++;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
} else {
|
||||
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
|
||||
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
|
||||
U32 offset;
|
||||
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
|
||||
offset = curr - matchLongIndex;
|
||||
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
|
||||
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
|
||||
U32 const matchIndex3 = hashLong[h3];
|
||||
const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
|
||||
const BYTE* match3 = match3Base + matchIndex3;
|
||||
U32 offset;
|
||||
hashLong[h3] = curr + 1;
|
||||
if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
|
||||
const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
|
||||
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
|
||||
ip++;
|
||||
offset = curr+1 - matchIndex3;
|
||||
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
|
||||
} else {
|
||||
const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
|
||||
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
|
||||
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
|
||||
offset = curr - matchIndex;
|
||||
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
|
||||
}
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
|
||||
} else {
|
||||
ip += ((ip-anchor) >> kSearchStrength) + 1;
|
||||
continue;
|
||||
} }
|
||||
|
||||
/* move to next sequence start */
|
||||
ip += mLength;
|
||||
anchor = ip;
|
||||
|
||||
if (ip <= ilimit) {
|
||||
/* Complementary insertion */
|
||||
/* done after iLimit test, as candidates could be > iend-8 */
|
||||
{ U32 const indexToInsert = curr+2;
|
||||
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
|
||||
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
|
||||
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
|
||||
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
|
||||
}
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip <= ilimit) {
|
||||
U32 const current2 = (U32)(ip-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
|
||||
& (offset_2 <= current2 - dictStartIndex))
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
|
||||
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
|
||||
ip += repLength2;
|
||||
anchor = ip;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} } }
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
}
|
||||
|
||||
ZSTD_GEN_DFAST_FN(extDict, 4)
|
||||
ZSTD_GEN_DFAST_FN(extDict, 5)
|
||||
ZSTD_GEN_DFAST_FN(extDict, 6)
|
||||
ZSTD_GEN_DFAST_FN(extDict, 7)
|
||||
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
39
ext/zstd/lib/compress/zstd_double_fast.h
Normal file
39
ext/zstd/lib/compress/zstd_double_fast.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_DOUBLE_FAST_H
|
||||
#define ZSTD_DOUBLE_FAST_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "../common/mem.h" /* U32 */
|
||||
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
|
||||
|
||||
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp);
|
||||
size_t ZSTD_compressBlock_doubleFast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_doubleFast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_DOUBLE_FAST_H */
|
||||
960
ext/zstd/lib/compress/zstd_fast.c
vendored
Normal file
960
ext/zstd/lib/compress/zstd_fast.c
vendored
Normal file
@@ -0,0 +1,960 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
|
||||
#include "zstd_fast.h"
|
||||
|
||||
static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
U32 const mls = cParams->minMatch;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
|
||||
* Feel free to remove this assert if there's a good reason! */
|
||||
assert(dtlm == ZSTD_dtlm_full);
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash table.
|
||||
* Insert the other positions if their hash entry is empty.
|
||||
*/
|
||||
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
{ size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
|
||||
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
|
||||
|
||||
if (dtlm == ZSTD_dtlm_fast) continue;
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
{ U32 p;
|
||||
for (p = 1; p < fastHashFillStep; ++p) {
|
||||
size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
|
||||
if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
|
||||
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
|
||||
} } } }
|
||||
}
|
||||
|
||||
static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hBits = cParams->hashLog;
|
||||
U32 const mls = cParams->minMatch;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* ip = base + ms->nextToUpdate;
|
||||
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
|
||||
const U32 fastHashFillStep = 3;
|
||||
|
||||
/* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
|
||||
* Feel free to remove this assert if there's a good reason! */
|
||||
assert(dtlm == ZSTD_dtlm_fast);
|
||||
|
||||
/* Always insert every fastHashFillStep position into the hash table.
|
||||
* Insert the other positions if their hash entry is empty.
|
||||
*/
|
||||
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
|
||||
U32 const curr = (U32)(ip - base);
|
||||
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
|
||||
hashTable[hash0] = curr;
|
||||
if (dtlm == ZSTD_dtlm_fast) continue;
|
||||
/* Only load extra positions for ZSTD_dtlm_full */
|
||||
{ U32 p;
|
||||
for (p = 1; p < fastHashFillStep; ++p) {
|
||||
size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
|
||||
if (hashTable[hash] == 0) { /* not yet filled */
|
||||
hashTable[hash] = curr + p;
|
||||
} } } }
|
||||
}
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
const void* const end,
|
||||
ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp)
|
||||
{
|
||||
if (tfp == ZSTD_tfp_forCDict) {
|
||||
ZSTD_fillHashTableForCDict(ms, end, dtlm);
|
||||
} else {
|
||||
ZSTD_fillHashTableForCCtx(ms, end, dtlm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* If you squint hard enough (and ignore repcodes), the search operation at any
|
||||
* given position is broken into 4 stages:
|
||||
*
|
||||
* 1. Hash (map position to hash value via input read)
|
||||
* 2. Lookup (map hash val to index via hashtable read)
|
||||
* 3. Load (map index to value at that position via input read)
|
||||
* 4. Compare
|
||||
*
|
||||
* Each of these steps involves a memory read at an address which is computed
|
||||
* from the previous step. This means these steps must be sequenced and their
|
||||
* latencies are cumulative.
|
||||
*
|
||||
* Rather than do 1->2->3->4 sequentially for a single position before moving
|
||||
* onto the next, this implementation interleaves these operations across the
|
||||
* next few positions:
|
||||
*
|
||||
* R = Repcode Read & Compare
|
||||
* H = Hash
|
||||
* T = Table Lookup
|
||||
* M = Match Read & Compare
|
||||
*
|
||||
* Pos | Time -->
|
||||
* ----+-------------------
|
||||
* N | ... M
|
||||
* N+1 | ... TM
|
||||
* N+2 | R H T M
|
||||
* N+3 | H TM
|
||||
* N+4 | R H T M
|
||||
* N+5 | H ...
|
||||
* N+6 | R ...
|
||||
*
|
||||
* This is very much analogous to the pipelining of execution in a CPU. And just
|
||||
* like a CPU, we have to dump the pipeline when we find a match (i.e., take a
|
||||
* branch).
|
||||
*
|
||||
* When this happens, we throw away our current state, and do the following prep
|
||||
* to re-enter the loop:
|
||||
*
|
||||
* Pos | Time -->
|
||||
* ----+-------------------
|
||||
* N | H T
|
||||
* N+1 | H
|
||||
*
|
||||
* This is also the work we do at the beginning to enter the loop initially.
|
||||
*/
|
||||
FORCE_INLINE_TEMPLATE size_t
|
||||
ZSTD_compressBlock_fast_noDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize,
|
||||
U32 const mls, U32 const hasStep)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
/* support stepSize of 0 */
|
||||
size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
|
||||
const BYTE* anchor = istart;
|
||||
const BYTE* ip0 = istart;
|
||||
const BYTE* ip1;
|
||||
const BYTE* ip2;
|
||||
const BYTE* ip3;
|
||||
U32 current0;
|
||||
|
||||
U32 rep_offset1 = rep[0];
|
||||
U32 rep_offset2 = rep[1];
|
||||
U32 offsetSaved1 = 0, offsetSaved2 = 0;
|
||||
|
||||
size_t hash0; /* hash for ip0 */
|
||||
size_t hash1; /* hash for ip1 */
|
||||
U32 idx; /* match idx for ip0 */
|
||||
U32 mval; /* src value at match idx */
|
||||
|
||||
U32 offcode;
|
||||
const BYTE* match0;
|
||||
size_t mLength;
|
||||
|
||||
/* ip0 and ip1 are always adjacent. The targetLength skipping and
|
||||
* uncompressibility acceleration is applied to every other position,
|
||||
* matching the behavior of #1562. step therefore represents the gap
|
||||
* between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
|
||||
size_t step;
|
||||
const BYTE* nextStep;
|
||||
const size_t kStepIncr = (1 << (kSearchStrength - 1));
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
|
||||
ip0 += (ip0 == prefixStart);
|
||||
{ U32 const curr = (U32)(ip0 - base);
|
||||
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
|
||||
U32 const maxRep = curr - windowLow;
|
||||
if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
|
||||
if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
|
||||
}
|
||||
|
||||
/* start each op */
|
||||
_start: /* Requires: ip0 */
|
||||
|
||||
step = stepSize;
|
||||
nextStep = ip0 + kStepIncr;
|
||||
|
||||
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
|
||||
ip1 = ip0 + 1;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip2 + 1;
|
||||
|
||||
if (ip3 >= ilimit) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||
|
||||
idx = hashTable[hash0];
|
||||
|
||||
do {
|
||||
/* load repcode match for ip[2]*/
|
||||
const U32 rval = MEM_read32(ip2 - rep_offset1);
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
/* check repcode at ip[2] */
|
||||
if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
|
||||
ip0 = ip2;
|
||||
match0 = ip0 - rep_offset1;
|
||||
mLength = ip0[-1] == match0[-1];
|
||||
ip0 -= mLength;
|
||||
match0 -= mLength;
|
||||
offcode = REPCODE1_TO_OFFBASE;
|
||||
mLength += 4;
|
||||
|
||||
/* First write next hash table entry; we've already calculated it.
|
||||
* This write is known to be safe because the ip1 is before the
|
||||
* repcode (ip2). */
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
|
||||
goto _match;
|
||||
}
|
||||
|
||||
/* load match for ip[0] */
|
||||
if (idx >= prefixStartIndex) {
|
||||
mval = MEM_read32(base + idx);
|
||||
} else {
|
||||
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
|
||||
}
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
|
||||
/* First write next hash table entry; we've already calculated it.
|
||||
* This write is known to be safe because the ip1 == ip0 + 1, so
|
||||
* we know we will resume searching after ip1 */
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
|
||||
goto _offset;
|
||||
}
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip3;
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
/* load match for ip[0] */
|
||||
if (idx >= prefixStartIndex) {
|
||||
mval = MEM_read32(base + idx);
|
||||
} else {
|
||||
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
|
||||
}
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
|
||||
/* first write next hash table entry; we've already calculated it */
|
||||
if (step <= 4) {
|
||||
/* We need to avoid writing an index into the hash table >= the
|
||||
* position at which we will pick up our searching after we've
|
||||
* taken this match.
|
||||
*
|
||||
* The minimum possible match has length 4, so the earliest ip0
|
||||
* can be after we take this match will be the current ip0 + 4.
|
||||
* ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
|
||||
* write this position.
|
||||
*/
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
}
|
||||
|
||||
goto _offset;
|
||||
}
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip1 + step;
|
||||
|
||||
/* calculate step */
|
||||
if (ip2 >= nextStep) {
|
||||
step++;
|
||||
PREFETCH_L1(ip1 + 64);
|
||||
PREFETCH_L1(ip1 + 128);
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
} while (ip3 < ilimit);
|
||||
|
||||
_cleanup:
|
||||
/* Note that there are probably still a couple positions we could search.
|
||||
* However, it seems to be a meaningful performance hit to try to search
|
||||
* them. So let's not. */
|
||||
|
||||
/* When the repcodes are outside of the prefix, we set them to zero before the loop.
|
||||
* When the offsets are still zero, we need to restore them after the block to have a correct
|
||||
* repcode history. If only one offset was invalid, it is easy. The tricky case is when both
|
||||
* offsets were invalid. We need to figure out which offset to refill with.
|
||||
* - If both offsets are zero they are in the same order.
|
||||
* - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
|
||||
* - If only one is zero, we need to decide which offset to restore.
|
||||
* - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
|
||||
* - It is impossible for rep_offset2 to be non-zero.
|
||||
*
|
||||
* So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
|
||||
* set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
|
||||
*/
|
||||
offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
|
||||
rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
|
||||
_offset: /* Requires: ip0, idx */
|
||||
|
||||
/* Compute the offset code. */
|
||||
match0 = base + idx;
|
||||
rep_offset2 = rep_offset1;
|
||||
rep_offset1 = (U32)(ip0-match0);
|
||||
offcode = OFFSET_TO_OFFBASE(rep_offset1);
|
||||
mLength = 4;
|
||||
|
||||
/* Count the backwards match length. */
|
||||
while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
|
||||
ip0--;
|
||||
match0--;
|
||||
mLength++;
|
||||
}
|
||||
|
||||
_match: /* Requires: ip0, match0, offcode */
|
||||
|
||||
/* Count the forward length. */
|
||||
mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
|
||||
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
||||
/* Fill table and check for immediate repcode. */
|
||||
if (ip0 <= ilimit) {
|
||||
/* Fill Table */
|
||||
assert(base+current0+2 > istart); /* check base overflow */
|
||||
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
|
||||
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
|
||||
/* store sequence */
|
||||
size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
|
||||
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
||||
ip0 += rLength;
|
||||
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
|
||||
anchor = ip0;
|
||||
continue; /* faster when present (confirmed on gcc-8) ... (?) */
|
||||
} } }
|
||||
|
||||
goto _start;
|
||||
}
|
||||
|
||||
#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
|
||||
static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
|
||||
void const* src, size_t srcSize) \
|
||||
{ \
|
||||
return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
|
||||
}
|
||||
|
||||
ZSTD_GEN_FAST_FN(noDict, 4, 1)
|
||||
ZSTD_GEN_FAST_FN(noDict, 5, 1)
|
||||
ZSTD_GEN_FAST_FN(noDict, 6, 1)
|
||||
ZSTD_GEN_FAST_FN(noDict, 7, 1)
|
||||
|
||||
ZSTD_GEN_FAST_FN(noDict, 4, 0)
|
||||
ZSTD_GEN_FAST_FN(noDict, 5, 0)
|
||||
ZSTD_GEN_FAST_FN(noDict, 6, 0)
|
||||
ZSTD_GEN_FAST_FN(noDict, 7, 0)
|
||||
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState == NULL);
|
||||
if (ms->cParams.targetLength > 1) {
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
} else {
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
FORCE_INLINE_TEMPLATE
|
||||
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
/* support stepSize of 0 */
|
||||
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* ip0 = istart;
|
||||
const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
|
||||
const BYTE* anchor = istart;
|
||||
const U32 prefixStartIndex = ms->window.dictLimit;
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - HASH_READ_SIZE;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
|
||||
const ZSTD_matchState_t* const dms = ms->dictMatchState;
|
||||
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
|
||||
const U32* const dictHashTable = dms->hashTable;
|
||||
const U32 dictStartIndex = dms->window.dictLimit;
|
||||
const BYTE* const dictBase = dms->window.base;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const BYTE* const dictEnd = dms->window.nextSrc;
|
||||
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
|
||||
const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
|
||||
const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
|
||||
/* if a dictionary is still attached, it necessarily means that
|
||||
* it is within window size. So we just check it. */
|
||||
const U32 maxDistance = 1U << cParams->windowLog;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
assert(endIndex - prefixStartIndex <= maxDistance);
|
||||
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
|
||||
|
||||
(void)hasStep; /* not currently specialized on whether it's accelerated */
|
||||
|
||||
/* ensure there will be no underflow
|
||||
* when translating a dict index into a local index */
|
||||
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
|
||||
|
||||
if (ms->prefetchCDictTables) {
|
||||
size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
|
||||
PREFETCH_AREA(dictHashTable, hashTableBytes)
|
||||
}
|
||||
|
||||
/* init */
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
|
||||
ip0 += (dictAndPrefixLength == 0);
|
||||
/* dictMatchState repCode checks don't currently handle repCode == 0
|
||||
* disabling. */
|
||||
assert(offset_1 <= dictAndPrefixLength);
|
||||
assert(offset_2 <= dictAndPrefixLength);
|
||||
|
||||
/* Outer search loop */
|
||||
assert(stepSize >= 1);
|
||||
while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
|
||||
size_t mLength;
|
||||
size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||
|
||||
size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
|
||||
U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
|
||||
|
||||
U32 matchIndex = hashTable[hash0];
|
||||
U32 curr = (U32)(ip0 - base);
|
||||
size_t step = stepSize;
|
||||
const size_t kStepIncr = 1 << kSearchStrength;
|
||||
const BYTE* nextStep = ip0 + kStepIncr;
|
||||
|
||||
/* Inner search loop */
|
||||
while (1) {
|
||||
const BYTE* match = base + matchIndex;
|
||||
const U32 repIndex = curr + 1 - offset_1;
|
||||
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
|
||||
dictBase + (repIndex - dictIndexDelta) :
|
||||
base + repIndex;
|
||||
const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||
size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
|
||||
hashTable[hash0] = curr; /* update hash table */
|
||||
|
||||
if (((U32) ((prefixStartIndex - 1) - repIndex) >=
|
||||
3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
|
||||
&& (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
|
||||
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
|
||||
ip0++;
|
||||
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
|
||||
break;
|
||||
}
|
||||
|
||||
if (dictTagsMatch) {
|
||||
/* Found a possible dict match */
|
||||
const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
|
||||
const BYTE* dictMatch = dictBase + dictMatchIndex;
|
||||
if (dictMatchIndex > dictStartIndex &&
|
||||
MEM_read32(dictMatch) == MEM_read32(ip0)) {
|
||||
/* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
|
||||
if (matchIndex <= prefixStartIndex) {
|
||||
U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
|
||||
mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
|
||||
while (((ip0 > anchor) & (dictMatch > dictStart))
|
||||
&& (ip0[-1] == dictMatch[-1])) {
|
||||
ip0--;
|
||||
dictMatch--;
|
||||
mLength++;
|
||||
} /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
|
||||
/* found a regular match */
|
||||
U32 const offset = (U32) (ip0 - match);
|
||||
mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
|
||||
while (((ip0 > anchor) & (match > prefixStart))
|
||||
&& (ip0[-1] == match[-1])) {
|
||||
ip0--;
|
||||
match--;
|
||||
mLength++;
|
||||
} /* catch up */
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Prepare for next iteration */
|
||||
dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
|
||||
dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
|
||||
matchIndex = hashTable[hash1];
|
||||
|
||||
if (ip1 >= nextStep) {
|
||||
step++;
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
ip0 = ip1;
|
||||
ip1 = ip1 + step;
|
||||
if (ip1 > ilimit) goto _cleanup;
|
||||
|
||||
curr = (U32)(ip0 - base);
|
||||
hash0 = hash1;
|
||||
} /* end inner search loop */
|
||||
|
||||
/* match found */
|
||||
assert(mLength);
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
||||
if (ip0 <= ilimit) {
|
||||
/* Fill Table */
|
||||
assert(base+curr+2 > istart); /* check base overflow */
|
||||
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
/* check immediate repcode */
|
||||
while (ip0 <= ilimit) {
|
||||
U32 const current2 = (U32)(ip0-base);
|
||||
U32 const repIndex2 = current2 - offset_2;
|
||||
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
|
||||
dictBase - dictIndexDelta + repIndex2 :
|
||||
base + repIndex2;
|
||||
if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip0))) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
|
||||
ip0 += repLength2;
|
||||
anchor = ip0;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare for next iteration */
|
||||
assert(ip0 == anchor);
|
||||
ip1 = ip0 + stepSize;
|
||||
}
|
||||
|
||||
_cleanup:
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1;
|
||||
rep[1] = offset_2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
}
|
||||
|
||||
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
|
||||
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
|
||||
|
||||
size_t ZSTD_compressBlock_fast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState != NULL);
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static size_t ZSTD_compressBlock_fast_extDict_generic(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
U32* const hashTable = ms->hashTable;
|
||||
U32 const hlog = cParams->hashLog;
|
||||
/* support stepSize of 0 */
|
||||
size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
|
||||
const BYTE* const base = ms->window.base;
|
||||
const BYTE* const dictBase = ms->window.dictBase;
|
||||
const BYTE* const istart = (const BYTE*)src;
|
||||
const BYTE* anchor = istart;
|
||||
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
|
||||
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
|
||||
const U32 dictStartIndex = lowLimit;
|
||||
const BYTE* const dictStart = dictBase + dictStartIndex;
|
||||
const U32 dictLimit = ms->window.dictLimit;
|
||||
const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
|
||||
const BYTE* const prefixStart = base + prefixStartIndex;
|
||||
const BYTE* const dictEnd = dictBase + prefixStartIndex;
|
||||
const BYTE* const iend = istart + srcSize;
|
||||
const BYTE* const ilimit = iend - 8;
|
||||
U32 offset_1=rep[0], offset_2=rep[1];
|
||||
U32 offsetSaved1 = 0, offsetSaved2 = 0;
|
||||
|
||||
const BYTE* ip0 = istart;
|
||||
const BYTE* ip1;
|
||||
const BYTE* ip2;
|
||||
const BYTE* ip3;
|
||||
U32 current0;
|
||||
|
||||
|
||||
size_t hash0; /* hash for ip0 */
|
||||
size_t hash1; /* hash for ip1 */
|
||||
U32 idx; /* match idx for ip0 */
|
||||
const BYTE* idxBase; /* base pointer for idx */
|
||||
|
||||
U32 offcode;
|
||||
const BYTE* match0;
|
||||
size_t mLength;
|
||||
const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
|
||||
|
||||
size_t step;
|
||||
const BYTE* nextStep;
|
||||
const size_t kStepIncr = (1 << (kSearchStrength - 1));
|
||||
|
||||
(void)hasStep; /* not currently specialized on whether it's accelerated */
|
||||
|
||||
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
|
||||
|
||||
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
|
||||
if (prefixStartIndex == dictStartIndex)
|
||||
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
|
||||
|
||||
{ U32 const curr = (U32)(ip0 - base);
|
||||
U32 const maxRep = curr - dictStartIndex;
|
||||
if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
|
||||
if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
|
||||
}
|
||||
|
||||
/* start each op */
|
||||
_start: /* Requires: ip0 */
|
||||
|
||||
step = stepSize;
|
||||
nextStep = ip0 + kStepIncr;
|
||||
|
||||
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
|
||||
ip1 = ip0 + 1;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip2 + 1;
|
||||
|
||||
if (ip3 >= ilimit) {
|
||||
goto _cleanup;
|
||||
}
|
||||
|
||||
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
|
||||
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
|
||||
|
||||
idx = hashTable[hash0];
|
||||
idxBase = idx < prefixStartIndex ? dictBase : base;
|
||||
|
||||
do {
|
||||
{ /* load repcode match for ip[2] */
|
||||
U32 const current2 = (U32)(ip2 - base);
|
||||
U32 const repIndex = current2 - offset_1;
|
||||
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
|
||||
U32 rval;
|
||||
if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
|
||||
& (offset_1 > 0) ) {
|
||||
rval = MEM_read32(repBase + repIndex);
|
||||
} else {
|
||||
rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
|
||||
}
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
/* check repcode at ip[2] */
|
||||
if (MEM_read32(ip2) == rval) {
|
||||
ip0 = ip2;
|
||||
match0 = repBase + repIndex;
|
||||
matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
|
||||
assert((match0 != prefixStart) & (match0 != dictStart));
|
||||
mLength = ip0[-1] == match0[-1];
|
||||
ip0 -= mLength;
|
||||
match0 -= mLength;
|
||||
offcode = REPCODE1_TO_OFFBASE;
|
||||
mLength += 4;
|
||||
goto _match;
|
||||
} }
|
||||
|
||||
{ /* load match for ip[0] */
|
||||
U32 const mval = idx >= dictStartIndex ?
|
||||
MEM_read32(idxBase + idx) :
|
||||
MEM_read32(ip0) ^ 1; /* guaranteed not to match */
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
goto _offset;
|
||||
} }
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
idxBase = idx < prefixStartIndex ? dictBase : base;
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip3;
|
||||
|
||||
/* write back hash table entry */
|
||||
current0 = (U32)(ip0 - base);
|
||||
hashTable[hash0] = current0;
|
||||
|
||||
{ /* load match for ip[0] */
|
||||
U32 const mval = idx >= dictStartIndex ?
|
||||
MEM_read32(idxBase + idx) :
|
||||
MEM_read32(ip0) ^ 1; /* guaranteed not to match */
|
||||
|
||||
/* check match at ip[0] */
|
||||
if (MEM_read32(ip0) == mval) {
|
||||
/* found a match! */
|
||||
goto _offset;
|
||||
} }
|
||||
|
||||
/* lookup ip[1] */
|
||||
idx = hashTable[hash1];
|
||||
idxBase = idx < prefixStartIndex ? dictBase : base;
|
||||
|
||||
/* hash ip[2] */
|
||||
hash0 = hash1;
|
||||
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
|
||||
|
||||
/* advance to next positions */
|
||||
ip0 = ip1;
|
||||
ip1 = ip2;
|
||||
ip2 = ip0 + step;
|
||||
ip3 = ip1 + step;
|
||||
|
||||
/* calculate step */
|
||||
if (ip2 >= nextStep) {
|
||||
step++;
|
||||
PREFETCH_L1(ip1 + 64);
|
||||
PREFETCH_L1(ip1 + 128);
|
||||
nextStep += kStepIncr;
|
||||
}
|
||||
} while (ip3 < ilimit);
|
||||
|
||||
_cleanup:
|
||||
/* Note that there are probably still a couple positions we could search.
|
||||
* However, it seems to be a meaningful performance hit to try to search
|
||||
* them. So let's not. */
|
||||
|
||||
/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
|
||||
* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
|
||||
offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
|
||||
|
||||
/* save reps for next block */
|
||||
rep[0] = offset_1 ? offset_1 : offsetSaved1;
|
||||
rep[1] = offset_2 ? offset_2 : offsetSaved2;
|
||||
|
||||
/* Return the last literals size */
|
||||
return (size_t)(iend - anchor);
|
||||
|
||||
_offset: /* Requires: ip0, idx, idxBase */
|
||||
|
||||
/* Compute the offset code. */
|
||||
{ U32 const offset = current0 - idx;
|
||||
const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
|
||||
matchEnd = idx < prefixStartIndex ? dictEnd : iend;
|
||||
match0 = idxBase + idx;
|
||||
offset_2 = offset_1;
|
||||
offset_1 = offset;
|
||||
offcode = OFFSET_TO_OFFBASE(offset);
|
||||
mLength = 4;
|
||||
|
||||
/* Count the backwards match length. */
|
||||
while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
|
||||
ip0--;
|
||||
match0--;
|
||||
mLength++;
|
||||
} }
|
||||
|
||||
_match: /* Requires: ip0, match0, offcode, matchEnd */
|
||||
|
||||
/* Count the forward length. */
|
||||
assert(matchEnd != 0);
|
||||
mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
|
||||
|
||||
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
|
||||
|
||||
ip0 += mLength;
|
||||
anchor = ip0;
|
||||
|
||||
/* write next hash table entry */
|
||||
if (ip1 < ip0) {
|
||||
hashTable[hash1] = (U32)(ip1 - base);
|
||||
}
|
||||
|
||||
/* Fill table and check for immediate repcode. */
|
||||
if (ip0 <= ilimit) {
|
||||
/* Fill Table */
|
||||
assert(base+current0+2 > istart); /* check base overflow */
|
||||
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
|
||||
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
|
||||
|
||||
while (ip0 <= ilimit) {
|
||||
U32 const repIndex2 = (U32)(ip0-base) - offset_2;
|
||||
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
|
||||
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
|
||||
&& (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
|
||||
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
|
||||
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
|
||||
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
|
||||
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
|
||||
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
|
||||
ip0 += repLength2;
|
||||
anchor = ip0;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} }
|
||||
|
||||
goto _start;
|
||||
}
|
||||
|
||||
ZSTD_GEN_FAST_FN(extDict, 4, 0)
|
||||
ZSTD_GEN_FAST_FN(extDict, 5, 0)
|
||||
ZSTD_GEN_FAST_FN(extDict, 6, 0)
|
||||
ZSTD_GEN_FAST_FN(extDict, 7, 0)
|
||||
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const mls = ms->cParams.minMatch;
|
||||
assert(ms->dictMatchState == NULL);
|
||||
switch(mls)
|
||||
{
|
||||
default: /* includes case 3 */
|
||||
case 4 :
|
||||
return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
|
||||
case 5 :
|
||||
return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
|
||||
case 6 :
|
||||
return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
|
||||
case 7 :
|
||||
return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
|
||||
}
|
||||
}
|
||||
38
ext/zstd/lib/compress/zstd_fast.h
Normal file
38
ext/zstd/lib/compress/zstd_fast.h
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_FAST_H
|
||||
#define ZSTD_FAST_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "../common/mem.h" /* U32 */
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
|
||||
void const* end, ZSTD_dictTableLoadMethod_e dtlm,
|
||||
ZSTD_tableFillPurpose_e tfp);
|
||||
size_t ZSTD_compressBlock_fast(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_fast_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_FAST_H */
|
||||
2157
ext/zstd/lib/compress/zstd_lazy.c
vendored
Normal file
2157
ext/zstd/lib/compress/zstd_lazy.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
127
ext/zstd/lib/compress/zstd_lazy.h
Normal file
127
ext/zstd/lib/compress/zstd_lazy.h
Normal file
@@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_LAZY_H
|
||||
#define ZSTD_LAZY_H
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "zstd_compress_internal.h"
|
||||
|
||||
/**
|
||||
* Dedicated Dictionary Search Structure bucket log. In the
|
||||
* ZSTD_dedicatedDictSearch mode, the hashTable has
|
||||
* 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
|
||||
* one.
|
||||
*/
|
||||
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
|
||||
|
||||
#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
|
||||
|
||||
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
|
||||
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
|
||||
|
||||
void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
|
||||
|
||||
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dictMatchState(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
size_t ZSTD_compressBlock_greedy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_greedy_extDict_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy_extDict_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_lazy2_extDict_row(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
size_t ZSTD_compressBlock_btlazy2_extDict(
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
void const* src, size_t srcSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZSTD_LAZY_H */
|
||||
724
ext/zstd/lib/compress/zstd_ldm.c
vendored
Normal file
724
ext/zstd/lib/compress/zstd_ldm.c
vendored
Normal file
@@ -0,0 +1,724 @@
|
||||
/*
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
||||
* in the COPYING file in the root directory of this source tree).
|
||||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#include "zstd_ldm.h"
|
||||
|
||||
#include "../common/debug.h"
|
||||
#include "../common/xxhash.h"
|
||||
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
|
||||
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
|
||||
#include "zstd_ldm_geartab.h"
|
||||
|
||||
#define LDM_BUCKET_SIZE_LOG 3
|
||||
#define LDM_MIN_MATCH_LENGTH 64
|
||||
#define LDM_HASH_RLOG 7
|
||||
|
||||
typedef struct {
|
||||
U64 rolling;
|
||||
U64 stopMask;
|
||||
} ldmRollingHashState_t;
|
||||
|
||||
/** ZSTD_ldm_gear_init():
|
||||
*
|
||||
* Initializes the rolling hash state such that it will honor the
|
||||
* settings in params. */
|
||||
static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
|
||||
{
|
||||
unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
|
||||
unsigned hashRateLog = params->hashRateLog;
|
||||
|
||||
state->rolling = ~(U32)0;
|
||||
|
||||
/* The choice of the splitting criterion is subject to two conditions:
|
||||
* 1. it has to trigger on average every 2^(hashRateLog) bytes;
|
||||
* 2. ideally, it has to depend on a window of minMatchLength bytes.
|
||||
*
|
||||
* In the gear hash algorithm, bit n depends on the last n bytes;
|
||||
* so in order to obtain a good quality splitting criterion it is
|
||||
* preferable to use bits with high weight.
|
||||
*
|
||||
* To match condition 1 we use a mask with hashRateLog bits set
|
||||
* and, because of the previous remark, we make sure these bits
|
||||
* have the highest possible weight while still respecting
|
||||
* condition 2.
|
||||
*/
|
||||
if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
|
||||
state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
|
||||
} else {
|
||||
/* In this degenerate case we simply honor the hash rate. */
|
||||
state->stopMask = ((U64)1 << hashRateLog) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_gear_reset()
|
||||
* Feeds [data, data + minMatchLength) into the hash without registering any
|
||||
* splits. This effectively resets the hash state. This is used when skipping
|
||||
* over data, either at the beginning of a block, or skipping sections.
|
||||
*/
|
||||
static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
|
||||
BYTE const* data, size_t minMatchLength)
|
||||
{
|
||||
U64 hash = state->rolling;
|
||||
size_t n = 0;
|
||||
|
||||
#define GEAR_ITER_ONCE() do { \
|
||||
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
|
||||
n += 1; \
|
||||
} while (0)
|
||||
while (n + 3 < minMatchLength) {
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
while (n < minMatchLength) {
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
#undef GEAR_ITER_ONCE
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_gear_feed():
|
||||
*
|
||||
* Registers in the splits array all the split points found in the first
|
||||
* size bytes following the data pointer. This function terminates when
|
||||
* either all the data has been processed or LDM_BATCH_SIZE splits are
|
||||
* present in the splits array.
|
||||
*
|
||||
* Precondition: The splits array must not be full.
|
||||
* Returns: The number of bytes processed. */
|
||||
static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
|
||||
BYTE const* data, size_t size,
|
||||
size_t* splits, unsigned* numSplits)
|
||||
{
|
||||
size_t n;
|
||||
U64 hash, mask;
|
||||
|
||||
hash = state->rolling;
|
||||
mask = state->stopMask;
|
||||
n = 0;
|
||||
|
||||
#define GEAR_ITER_ONCE() do { \
|
||||
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
|
||||
n += 1; \
|
||||
if (UNLIKELY((hash & mask) == 0)) { \
|
||||
splits[*numSplits] = n; \
|
||||
*numSplits += 1; \
|
||||
if (*numSplits == LDM_BATCH_SIZE) \
|
||||
goto done; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
while (n + 3 < size) {
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
while (n < size) {
|
||||
GEAR_ITER_ONCE();
|
||||
}
|
||||
|
||||
#undef GEAR_ITER_ONCE
|
||||
|
||||
done:
|
||||
state->rolling = hash;
|
||||
return n;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
||||
ZSTD_compressionParameters const* cParams)
|
||||
{
|
||||
params->windowLog = cParams->windowLog;
|
||||
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
|
||||
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
|
||||
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
||||
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
||||
if (params->hashLog == 0) {
|
||||
params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
|
||||
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
|
||||
}
|
||||
if (params->hashRateLog == 0) {
|
||||
params->hashRateLog = params->windowLog < params->hashLog
|
||||
? 0
|
||||
: params->windowLog - params->hashLog;
|
||||
}
|
||||
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getTableSize(ldmParams_t params)
|
||||
{
|
||||
size_t const ldmHSize = ((size_t)1) << params.hashLog;
|
||||
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
|
||||
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
|
||||
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
|
||||
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
|
||||
return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
||||
{
|
||||
return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_getBucket() :
|
||||
* Returns a pointer to the start of the bucket associated with hash. */
|
||||
static ldmEntry_t* ZSTD_ldm_getBucket(
|
||||
ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
|
||||
{
|
||||
return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_insertEntry() :
|
||||
* Insert the entry with corresponding hash into the hash table */
|
||||
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
|
||||
size_t const hash, const ldmEntry_t entry,
|
||||
ldmParams_t const ldmParams)
|
||||
{
|
||||
BYTE* const pOffset = ldmState->bucketOffsets + hash;
|
||||
unsigned const offset = *pOffset;
|
||||
|
||||
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
|
||||
*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
|
||||
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_countBackwardsMatch() :
|
||||
* Returns the number of bytes that match backwards before pIn and pMatch.
|
||||
*
|
||||
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
|
||||
static size_t ZSTD_ldm_countBackwardsMatch(
|
||||
const BYTE* pIn, const BYTE* pAnchor,
|
||||
const BYTE* pMatch, const BYTE* pMatchBase)
|
||||
{
|
||||
size_t matchLength = 0;
|
||||
while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
|
||||
pIn--;
|
||||
pMatch--;
|
||||
matchLength++;
|
||||
}
|
||||
return matchLength;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_countBackwardsMatch_2segments() :
|
||||
* Returns the number of bytes that match backwards from pMatch,
|
||||
* even with the backwards match spanning 2 different segments.
|
||||
*
|
||||
* On reaching `pMatchBase`, start counting from mEnd */
|
||||
static size_t ZSTD_ldm_countBackwardsMatch_2segments(
|
||||
const BYTE* pIn, const BYTE* pAnchor,
|
||||
const BYTE* pMatch, const BYTE* pMatchBase,
|
||||
const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
|
||||
{
|
||||
size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
|
||||
if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
|
||||
/* If backwards match is entirely in the extDict or prefix, immediately return */
|
||||
return matchLength;
|
||||
}
|
||||
DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
|
||||
matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
|
||||
DEBUGLOG(7, "final backwards match length = %zu", matchLength);
|
||||
return matchLength;
|
||||
}
|
||||
|
||||
/** ZSTD_ldm_fillFastTables() :
|
||||
*
|
||||
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
|
||||
* This is similar to ZSTD_loadDictionaryContent.
|
||||
*
|
||||
* The tables for the other strategies are filled within their
|
||||
* block compressors. */
|
||||
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
|
||||
void const* end)
|
||||
{
|
||||
const BYTE* const iend = (const BYTE*)end;
|
||||
|
||||
switch(ms->cParams.strategy)
|
||||
{
|
||||
case ZSTD_fast:
|
||||
ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
|
||||
break;
|
||||
|
||||
case ZSTD_dfast:
|
||||
ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
|
||||
break;
|
||||
|
||||
case ZSTD_greedy:
|
||||
case ZSTD_lazy:
|
||||
case ZSTD_lazy2:
|
||||
case ZSTD_btlazy2:
|
||||
case ZSTD_btopt:
|
||||
case ZSTD_btultra:
|
||||
case ZSTD_btultra2:
|
||||
break;
|
||||
default:
|
||||
assert(0); /* not possible : not a valid strategy id */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_fillHashTable(
|
||||
ldmState_t* ldmState, const BYTE* ip,
|
||||
const BYTE* iend, ldmParams_t const* params)
|
||||
{
|
||||
U32 const minMatchLength = params->minMatchLength;
|
||||
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
||||
BYTE const* const base = ldmState->window.base;
|
||||
BYTE const* const istart = ip;
|
||||
ldmRollingHashState_t hashState;
|
||||
size_t* const splits = ldmState->splitIndices;
|
||||
unsigned numSplits;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
|
||||
|
||||
ZSTD_ldm_gear_init(&hashState, params);
|
||||
while (ip < iend) {
|
||||
size_t hashed;
|
||||
unsigned n;
|
||||
|
||||
numSplits = 0;
|
||||
hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
|
||||
|
||||
for (n = 0; n < numSplits; n++) {
|
||||
if (ip + splits[n] >= istart + minMatchLength) {
|
||||
BYTE const* const split = ip + splits[n] - minMatchLength;
|
||||
U64 const xxhash = XXH64(split, minMatchLength, 0);
|
||||
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
|
||||
ldmEntry_t entry;
|
||||
|
||||
entry.offset = (U32)(split - base);
|
||||
entry.checksum = (U32)(xxhash >> 32);
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
|
||||
}
|
||||
}
|
||||
|
||||
ip += hashed;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** ZSTD_ldm_limitTableUpdate() :
|
||||
*
|
||||
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
|
||||
* if it is far way
|
||||
* (after a long match, only update tables a limited amount). */
|
||||
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
|
||||
{
|
||||
U32 const curr = (U32)(anchor - ms->window.base);
|
||||
if (curr > ms->nextToUpdate + 1024) {
|
||||
ms->nextToUpdate =
|
||||
curr - MIN(512, curr - ms->nextToUpdate - 1024);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t ZSTD_ldm_generateSequences_internal(
|
||||
ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
/* LDM parameters */
|
||||
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
|
||||
U32 const minMatchLength = params->minMatchLength;
|
||||
U32 const entsPerBucket = 1U << params->bucketSizeLog;
|
||||
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
||||
/* Prefix and extDict parameters */
|
||||
U32 const dictLimit = ldmState->window.dictLimit;
|
||||
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
|
||||
BYTE const* const base = ldmState->window.base;
|
||||
BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
|
||||
BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
|
||||
BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
|
||||
BYTE const* const lowPrefixPtr = base + dictLimit;
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
BYTE const* const ilimit = iend - HASH_READ_SIZE;
|
||||
/* Input positions */
|
||||
BYTE const* anchor = istart;
|
||||
BYTE const* ip = istart;
|
||||
/* Rolling hash state */
|
||||
ldmRollingHashState_t hashState;
|
||||
/* Arrays for staged-processing */
|
||||
size_t* const splits = ldmState->splitIndices;
|
||||
ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
|
||||
unsigned numSplits;
|
||||
|
||||
if (srcSize < minMatchLength)
|
||||
return iend - anchor;
|
||||
|
||||
/* Initialize the rolling hash state with the first minMatchLength bytes */
|
||||
ZSTD_ldm_gear_init(&hashState, params);
|
||||
ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
|
||||
ip += minMatchLength;
|
||||
|
||||
while (ip < ilimit) {
|
||||
size_t hashed;
|
||||
unsigned n;
|
||||
|
||||
numSplits = 0;
|
||||
hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
|
||||
splits, &numSplits);
|
||||
|
||||
for (n = 0; n < numSplits; n++) {
|
||||
BYTE const* const split = ip + splits[n] - minMatchLength;
|
||||
U64 const xxhash = XXH64(split, minMatchLength, 0);
|
||||
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
|
||||
|
||||
candidates[n].split = split;
|
||||
candidates[n].hash = hash;
|
||||
candidates[n].checksum = (U32)(xxhash >> 32);
|
||||
candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
|
||||
PREFETCH_L1(candidates[n].bucket);
|
||||
}
|
||||
|
||||
for (n = 0; n < numSplits; n++) {
|
||||
size_t forwardMatchLength = 0, backwardMatchLength = 0,
|
||||
bestMatchLength = 0, mLength;
|
||||
U32 offset;
|
||||
BYTE const* const split = candidates[n].split;
|
||||
U32 const checksum = candidates[n].checksum;
|
||||
U32 const hash = candidates[n].hash;
|
||||
ldmEntry_t* const bucket = candidates[n].bucket;
|
||||
ldmEntry_t const* cur;
|
||||
ldmEntry_t const* bestEntry = NULL;
|
||||
ldmEntry_t newEntry;
|
||||
|
||||
newEntry.offset = (U32)(split - base);
|
||||
newEntry.checksum = checksum;
|
||||
|
||||
/* If a split point would generate a sequence overlapping with
|
||||
* the previous one, we merely register it in the hash table and
|
||||
* move on */
|
||||
if (split < anchor) {
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
|
||||
size_t curForwardMatchLength, curBackwardMatchLength,
|
||||
curTotalMatchLength;
|
||||
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
||||
continue;
|
||||
}
|
||||
if (extDict) {
|
||||
BYTE const* const curMatchBase =
|
||||
cur->offset < dictLimit ? dictBase : base;
|
||||
BYTE const* const pMatch = curMatchBase + cur->offset;
|
||||
BYTE const* const matchEnd =
|
||||
cur->offset < dictLimit ? dictEnd : iend;
|
||||
BYTE const* const lowMatchPtr =
|
||||
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
||||
curForwardMatchLength =
|
||||
ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
|
||||
split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
|
||||
} else { /* !extDict */
|
||||
BYTE const* const pMatch = base + cur->offset;
|
||||
curForwardMatchLength = ZSTD_count(split, pMatch, iend);
|
||||
if (curForwardMatchLength < minMatchLength) {
|
||||
continue;
|
||||
}
|
||||
curBackwardMatchLength =
|
||||
ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
|
||||
}
|
||||
curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
|
||||
|
||||
if (curTotalMatchLength > bestMatchLength) {
|
||||
bestMatchLength = curTotalMatchLength;
|
||||
forwardMatchLength = curForwardMatchLength;
|
||||
backwardMatchLength = curBackwardMatchLength;
|
||||
bestEntry = cur;
|
||||
}
|
||||
}
|
||||
|
||||
/* No match found -- insert an entry into the hash table
|
||||
* and process the next candidate match */
|
||||
if (bestEntry == NULL) {
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Match found */
|
||||
offset = (U32)(split - base) - bestEntry->offset;
|
||||
mLength = forwardMatchLength + backwardMatchLength;
|
||||
{
|
||||
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
|
||||
|
||||
/* Out of sequence storage */
|
||||
if (rawSeqStore->size == rawSeqStore->capacity)
|
||||
return ERROR(dstSize_tooSmall);
|
||||
seq->litLength = (U32)(split - backwardMatchLength - anchor);
|
||||
seq->matchLength = (U32)mLength;
|
||||
seq->offset = offset;
|
||||
rawSeqStore->size++;
|
||||
}
|
||||
|
||||
/* Insert the current entry into the hash table --- it must be
|
||||
* done after the previous block to avoid clobbering bestEntry */
|
||||
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
||||
|
||||
anchor = split + forwardMatchLength;
|
||||
|
||||
/* If we find a match that ends after the data that we've hashed
|
||||
* then we have a repeating, overlapping, pattern. E.g. all zeros.
|
||||
* If one repetition of the pattern matches our `stopMask` then all
|
||||
* repetitions will. We don't need to insert them all into out table,
|
||||
* only the first one. So skip over overlapping matches.
|
||||
* This is a major speed boost (20x) for compressing a single byte
|
||||
* repeated, when that byte ends up in the table.
|
||||
*/
|
||||
if (anchor > ip + hashed) {
|
||||
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
|
||||
/* Continue the outer loop at anchor (ip + hashed == anchor). */
|
||||
ip = anchor - hashed;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ip += hashed;
|
||||
}
|
||||
|
||||
return iend - anchor;
|
||||
}
|
||||
|
||||
/*! ZSTD_ldm_reduceTable() :
|
||||
* reduce table indexes by `reducerValue` */
|
||||
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
|
||||
U32 const reducerValue)
|
||||
{
|
||||
U32 u;
|
||||
for (u = 0; u < size; u++) {
|
||||
if (table[u].offset < reducerValue) table[u].offset = 0;
|
||||
else table[u].offset -= reducerValue;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_generateSequences(
|
||||
ldmState_t* ldmState, rawSeqStore_t* sequences,
|
||||
ldmParams_t const* params, void const* src, size_t srcSize)
|
||||
{
|
||||
U32 const maxDist = 1U << params->windowLog;
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
size_t const kMaxChunkSize = 1 << 20;
|
||||
size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
|
||||
size_t chunk;
|
||||
size_t leftoverSize = 0;
|
||||
|
||||
assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
|
||||
/* Check that ZSTD_window_update() has been called for this chunk prior
|
||||
* to passing it to this function.
|
||||
*/
|
||||
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
|
||||
/* The input could be very large (in zstdmt), so it must be broken up into
|
||||
* chunks to enforce the maximum distance and handle overflow correction.
|
||||
*/
|
||||
assert(sequences->pos <= sequences->size);
|
||||
assert(sequences->size <= sequences->capacity);
|
||||
for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
|
||||
BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
|
||||
size_t const remaining = (size_t)(iend - chunkStart);
|
||||
BYTE const *const chunkEnd =
|
||||
(remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
|
||||
size_t const chunkSize = chunkEnd - chunkStart;
|
||||
size_t newLeftoverSize;
|
||||
size_t const prevSize = sequences->size;
|
||||
|
||||
assert(chunkStart < iend);
|
||||
/* 1. Perform overflow correction if necessary. */
|
||||
if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
|
||||
U32 const ldmHSize = 1U << params->hashLog;
|
||||
U32 const correction = ZSTD_window_correctOverflow(
|
||||
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
|
||||
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
|
||||
/* invalidate dictionaries on overflow correction */
|
||||
ldmState->loadedDictEnd = 0;
|
||||
}
|
||||
/* 2. We enforce the maximum offset allowed.
|
||||
*
|
||||
* kMaxChunkSize should be small enough that we don't lose too much of
|
||||
* the window through early invalidation.
|
||||
* TODO: * Test the chunk size.
|
||||
* * Try invalidation after the sequence generation and test the
|
||||
* offset against maxDist directly.
|
||||
*
|
||||
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
|
||||
* that any offset used is valid at the END of the sequence, since it may
|
||||
* be split into two sequences. This condition holds when using
|
||||
* ZSTD_window_enforceMaxDist(), but if we move to checking offsets
|
||||
* against maxDist directly, we'll have to carefully handle that case.
|
||||
*/
|
||||
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
|
||||
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
|
||||
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
|
||||
ldmState, sequences, params, chunkStart, chunkSize);
|
||||
if (ZSTD_isError(newLeftoverSize))
|
||||
return newLeftoverSize;
|
||||
/* 4. We add the leftover literals from previous iterations to the first
|
||||
* newly generated sequence, or add the `newLeftoverSize` if none are
|
||||
* generated.
|
||||
*/
|
||||
/* Prepend the leftover literals from the last call */
|
||||
if (prevSize < sequences->size) {
|
||||
sequences->seq[prevSize].litLength += (U32)leftoverSize;
|
||||
leftoverSize = newLeftoverSize;
|
||||
} else {
|
||||
assert(newLeftoverSize == chunkSize);
|
||||
leftoverSize += chunkSize;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
|
||||
{
|
||||
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
|
||||
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
|
||||
if (srcSize <= seq->litLength) {
|
||||
/* Skip past srcSize literals */
|
||||
seq->litLength -= (U32)srcSize;
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->litLength;
|
||||
seq->litLength = 0;
|
||||
if (srcSize < seq->matchLength) {
|
||||
/* Skip past the first srcSize of the match */
|
||||
seq->matchLength -= (U32)srcSize;
|
||||
if (seq->matchLength < minMatch) {
|
||||
/* The match is too short, omit it */
|
||||
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
|
||||
seq[1].litLength += seq[0].matchLength;
|
||||
}
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
srcSize -= seq->matchLength;
|
||||
seq->matchLength = 0;
|
||||
rawSeqStore->pos++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the sequence length is longer than remaining then the sequence is split
|
||||
* between this block and the next.
|
||||
*
|
||||
* Returns the current sequence to handle, or if the rest of the block should
|
||||
* be literals, it returns a sequence with offset == 0.
|
||||
*/
|
||||
static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
|
||||
U32 const remaining, U32 const minMatch)
|
||||
{
|
||||
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
|
||||
assert(sequence.offset > 0);
|
||||
/* Likely: No partial sequence */
|
||||
if (remaining >= sequence.litLength + sequence.matchLength) {
|
||||
rawSeqStore->pos++;
|
||||
return sequence;
|
||||
}
|
||||
/* Cut the sequence short (offset == 0 ==> rest is literals). */
|
||||
if (remaining <= sequence.litLength) {
|
||||
sequence.offset = 0;
|
||||
} else if (remaining < sequence.litLength + sequence.matchLength) {
|
||||
sequence.matchLength = remaining - sequence.litLength;
|
||||
if (sequence.matchLength < minMatch) {
|
||||
sequence.offset = 0;
|
||||
}
|
||||
}
|
||||
/* Skip past `remaining` bytes for the future sequences. */
|
||||
ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
|
||||
return sequence;
|
||||
}
|
||||
|
||||
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
|
||||
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
|
||||
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
|
||||
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
|
||||
if (currPos >= currSeq.litLength + currSeq.matchLength) {
|
||||
currPos -= currSeq.litLength + currSeq.matchLength;
|
||||
rawSeqStore->pos++;
|
||||
} else {
|
||||
rawSeqStore->posInSequence = currPos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
|
||||
rawSeqStore->posInSequence = 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
||||
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
||||
ZSTD_paramSwitch_e useRowMatchFinder,
|
||||
void const* src, size_t srcSize)
|
||||
{
|
||||
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
||||
unsigned const minMatch = cParams->minMatch;
|
||||
ZSTD_blockCompressor const blockCompressor =
|
||||
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
|
||||
/* Input bounds */
|
||||
BYTE const* const istart = (BYTE const*)src;
|
||||
BYTE const* const iend = istart + srcSize;
|
||||
/* Input positions */
|
||||
BYTE const* ip = istart;
|
||||
|
||||
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
|
||||
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
|
||||
if (cParams->strategy >= ZSTD_btopt) {
|
||||
size_t lastLLSize;
|
||||
ms->ldmSeqStore = rawSeqStore;
|
||||
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
|
||||
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
|
||||
return lastLLSize;
|
||||
}
|
||||
|
||||
assert(rawSeqStore->pos <= rawSeqStore->size);
|
||||
assert(rawSeqStore->size <= rawSeqStore->capacity);
|
||||
/* Loop through each sequence and apply the block compressor to the literals */
|
||||
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
|
||||
/* maybeSplitSequence updates rawSeqStore->pos */
|
||||
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
|
||||
(U32)(iend - ip), minMatch);
|
||||
int i;
|
||||
/* End signal */
|
||||
if (sequence.offset == 0)
|
||||
break;
|
||||
|
||||
assert(ip + sequence.litLength + sequence.matchLength <= iend);
|
||||
|
||||
/* Fill tables for block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, ip);
|
||||
/* Run the block compressor */
|
||||
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
|
||||
{
|
||||
size_t const newLitLength =
|
||||
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
|
||||
ip += sequence.litLength;
|
||||
/* Update the repcodes */
|
||||
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
|
||||
rep[i] = rep[i-1];
|
||||
rep[0] = sequence.offset;
|
||||
/* Store the sequence */
|
||||
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
|
||||
OFFSET_TO_OFFBASE(sequence.offset),
|
||||
sequence.matchLength);
|
||||
ip += sequence.matchLength;
|
||||
}
|
||||
}
|
||||
/* Fill the tables for the block compressor */
|
||||
ZSTD_ldm_limitTableUpdate(ms, ip);
|
||||
ZSTD_ldm_fillFastTables(ms, ip);
|
||||
/* Compress the last literals */
|
||||
return blockCompressor(ms, seqStore, rep, ip, iend - ip);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user