mirror of https://github.com/PCSX2/pcsx2.git
3rdparty: Update LZMA/7zipSDK to 23.01
This commit is contained in:
parent
e3ccb500d8
commit
fc2a750f85
|
@ -6,13 +6,17 @@ add_library(pcsx2-lzma STATIC
|
|||
include/7zFile.h
|
||||
include/7zTypes.h
|
||||
include/7zVersion.h
|
||||
include/7zWindows.h
|
||||
include/Alloc.h
|
||||
include/Aes.h
|
||||
include/Bcj2.h
|
||||
include/Bra.h
|
||||
include/Compiler.h
|
||||
include/CpuArch.h
|
||||
include/Delta.h
|
||||
include/DllSecur.h
|
||||
include/LzFind.h
|
||||
include/LzFindMt.h
|
||||
include/LzHash.h
|
||||
include/Lzma2Dec.h
|
||||
include/Lzma2DecMt.h
|
||||
|
@ -21,11 +25,16 @@ add_library(pcsx2-lzma STATIC
|
|||
include/LzmaDec.h
|
||||
include/LzmaEnc.h
|
||||
include/LzmaLib.h
|
||||
include/MtCoder.h
|
||||
include/MtDec.h
|
||||
include/Ppmd.h
|
||||
include/Ppmd7.h
|
||||
include/Precomp.h
|
||||
include/RotateDefs.h
|
||||
include/Sha256.h
|
||||
include/Sort.h
|
||||
include/SwapBytes.h
|
||||
include/Threads.h
|
||||
include/Xz.h
|
||||
include/XzCrc64.h
|
||||
include/XzEnc.h
|
||||
|
@ -38,6 +47,8 @@ add_library(pcsx2-lzma STATIC
|
|||
src/7zDec.c
|
||||
src/7zFile.c
|
||||
src/7zStream.c
|
||||
src/Aes.c
|
||||
src/AesOpt.c
|
||||
src/Alloc.c
|
||||
src/Bcj2.c
|
||||
src/Bcj2Enc.c
|
||||
|
@ -45,8 +56,10 @@ add_library(pcsx2-lzma STATIC
|
|||
src/Bra86.c
|
||||
src/BraIA64.c
|
||||
src/CpuArch.c
|
||||
src/DllSecur.c
|
||||
src/Delta.c
|
||||
src/LzFind.c
|
||||
src/LzFindMt.c
|
||||
src/LzFindOpt.c
|
||||
src/Lzma2Dec.c
|
||||
src/Lzma2DecMt.c
|
||||
|
@ -56,11 +69,16 @@ add_library(pcsx2-lzma STATIC
|
|||
src/LzmaDec.c
|
||||
src/LzmaEnc.c
|
||||
src/LzmaLib.c
|
||||
src/MtCoder.c
|
||||
src/MtDec.c
|
||||
src/Ppmd7.c
|
||||
src/Ppmd7Dec.c
|
||||
src/Ppmd7Enc.c
|
||||
src/Sha256.c
|
||||
src/Sha256Opt.c
|
||||
src/Sort.c
|
||||
src/SwapBytes.c
|
||||
src/Threads.c
|
||||
src/Xz.c
|
||||
src/XzCrc64.c
|
||||
src/XzCrc64Opt.c
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* 7z.h -- 7z interface
|
||||
2018-07-02 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_H
|
||||
#define __7Z_H
|
||||
#ifndef ZIP7_INC_7Z_H
|
||||
#define ZIP7_INC_7Z_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -98,7 +98,7 @@ typedef struct
|
|||
UInt64 SzAr_GetFolderUnpackSize(const CSzAr *p, UInt32 folderIndex);
|
||||
|
||||
SRes SzAr_DecodeFolder(const CSzAr *p, UInt32 folderIndex,
|
||||
ILookInStream *stream, UInt64 startPos,
|
||||
ILookInStreamPtr stream, UInt64 startPos,
|
||||
Byte *outBuffer, size_t outSize,
|
||||
ISzAllocPtr allocMain);
|
||||
|
||||
|
@ -174,7 +174,7 @@ UInt16 *SzArEx_GetFullNameUtf16_Back(const CSzArEx *p, size_t fileIndex, UInt16
|
|||
|
||||
SRes SzArEx_Extract(
|
||||
const CSzArEx *db,
|
||||
ILookInStream *inStream,
|
||||
ILookInStreamPtr inStream,
|
||||
UInt32 fileIndex, /* index of file */
|
||||
UInt32 *blockIndex, /* index of solid block */
|
||||
Byte **outBuffer, /* pointer to pointer to output buffer (allocated with allocMain) */
|
||||
|
@ -196,7 +196,7 @@ SZ_ERROR_INPUT_EOF
|
|||
SZ_ERROR_FAIL
|
||||
*/
|
||||
|
||||
SRes SzArEx_Open(CSzArEx *p, ILookInStream *inStream,
|
||||
SRes SzArEx_Open(CSzArEx *p, ILookInStreamPtr inStream,
|
||||
ISzAllocPtr allocMain, ISzAllocPtr allocTemp);
|
||||
|
||||
EXTERN_C_END
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* 7zAlloc.h -- Allocation functions
|
||||
2017-04-03 : Igor Pavlov : Public domain */
|
||||
2023-03-04 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_ALLOC_H
|
||||
#define __7Z_ALLOC_H
|
||||
#ifndef ZIP7_INC_7Z_ALLOC_H
|
||||
#define ZIP7_INC_7Z_ALLOC_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* 7zBuf.h -- Byte Buffer
|
||||
2017-04-03 : Igor Pavlov : Public domain */
|
||||
2023-03-04 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_BUF_H
|
||||
#define __7Z_BUF_H
|
||||
#ifndef ZIP7_INC_7Z_BUF_H
|
||||
#define ZIP7_INC_7Z_BUF_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* 7zCrc.h -- CRC32 calculation
|
||||
2013-01-18 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_CRC_H
|
||||
#define __7Z_CRC_H
|
||||
#ifndef ZIP7_INC_7Z_CRC_H
|
||||
#define ZIP7_INC_7Z_CRC_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -11,14 +11,16 @@ EXTERN_C_BEGIN
|
|||
extern UInt32 g_CrcTable[];
|
||||
|
||||
/* Call CrcGenerateTable one time before other CRC functions */
|
||||
void MY_FAST_CALL CrcGenerateTable(void);
|
||||
void Z7_FASTCALL CrcGenerateTable(void);
|
||||
|
||||
#define CRC_INIT_VAL 0xFFFFFFFF
|
||||
#define CRC_GET_DIGEST(crc) ((crc) ^ CRC_INIT_VAL)
|
||||
#define CRC_UPDATE_BYTE(crc, b) (g_CrcTable[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdate(UInt32 crc, const void *data, size_t size);
|
||||
UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size);
|
||||
UInt32 Z7_FASTCALL CrcUpdate(UInt32 crc, const void *data, size_t size);
|
||||
UInt32 Z7_FASTCALL CrcCalc(const void *data, size_t size);
|
||||
|
||||
typedef UInt32 (Z7_FASTCALL *CRC_FUNC)(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* 7zFile.h -- File IO
|
||||
2021-02-15 : Igor Pavlov : Public domain */
|
||||
2023-03-05 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_FILE_H
|
||||
#define __7Z_FILE_H
|
||||
#ifndef ZIP7_INC_FILE_H
|
||||
#define ZIP7_INC_FILE_H
|
||||
|
||||
#ifdef _WIN32
|
||||
#define USE_WINDOWS_FILE
|
||||
|
@ -10,7 +10,8 @@
|
|||
#endif
|
||||
|
||||
#ifdef USE_WINDOWS_FILE
|
||||
#include <windows.h>
|
||||
#include "7zWindows.h"
|
||||
|
||||
#else
|
||||
// note: USE_FOPEN mode is limited to 32-bit file size
|
||||
// #define USE_FOPEN
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* 7zTypes.h -- Basic types
|
||||
2021-12-25 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_TYPES_H
|
||||
#define __7Z_TYPES_H
|
||||
#ifndef ZIP7_7Z_TYPES_H
|
||||
#define ZIP7_7Z_TYPES_H
|
||||
|
||||
#ifdef _WIN32
|
||||
/* #include <windows.h> */
|
||||
|
@ -52,6 +52,11 @@ typedef int SRes;
|
|||
#define MY_ALIGN(n)
|
||||
#endif
|
||||
#else
|
||||
/*
|
||||
// C11/C++11:
|
||||
#include <stdalign.h>
|
||||
#define MY_ALIGN(n) alignas(n)
|
||||
*/
|
||||
#define MY_ALIGN(n) __attribute__ ((aligned(n)))
|
||||
#endif
|
||||
|
||||
|
@ -62,7 +67,7 @@ typedef int SRes;
|
|||
typedef unsigned WRes;
|
||||
#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x)
|
||||
|
||||
// #define MY_HRES_ERROR__INTERNAL_ERROR MY_SRes_HRESULT_FROM_WRes(ERROR_INTERNAL_ERROR)
|
||||
// #define MY_HRES_ERROR_INTERNAL_ERROR MY_SRes_HRESULT_FROM_WRes(ERROR_INTERNAL_ERROR)
|
||||
|
||||
#else // _WIN32
|
||||
|
||||
|
@ -70,13 +75,13 @@ typedef unsigned WRes;
|
|||
typedef int WRes;
|
||||
|
||||
// (FACILITY_ERRNO = 0x800) is 7zip's FACILITY constant to represent (errno) errors in HRESULT
|
||||
#define MY__FACILITY_ERRNO 0x800
|
||||
#define MY__FACILITY_WIN32 7
|
||||
#define MY__FACILITY__WRes MY__FACILITY_ERRNO
|
||||
#define MY_FACILITY_ERRNO 0x800
|
||||
#define MY_FACILITY_WIN32 7
|
||||
#define MY_FACILITY_WRes MY_FACILITY_ERRNO
|
||||
|
||||
#define MY_HRESULT_FROM_errno_CONST_ERROR(x) ((HRESULT)( \
|
||||
( (HRESULT)(x) & 0x0000FFFF) \
|
||||
| (MY__FACILITY__WRes << 16) \
|
||||
| (MY_FACILITY_WRes << 16) \
|
||||
| (HRESULT)0x80000000 ))
|
||||
|
||||
#define MY_SRes_HRESULT_FROM_WRes(x) \
|
||||
|
@ -120,23 +125,19 @@ typedef int WRes;
|
|||
#define ERROR_INVALID_REPARSE_DATA ((HRESULT)0x80071128L)
|
||||
#define ERROR_REPARSE_TAG_INVALID ((HRESULT)0x80071129L)
|
||||
|
||||
// if (MY__FACILITY__WRes != FACILITY_WIN32),
|
||||
// if (MY_FACILITY_WRes != FACILITY_WIN32),
|
||||
// we use FACILITY_WIN32 for COM errors:
|
||||
#define E_OUTOFMEMORY ((HRESULT)0x8007000EL)
|
||||
#define E_INVALIDARG ((HRESULT)0x80070057L)
|
||||
#define MY__E_ERROR_NEGATIVE_SEEK ((HRESULT)0x80070083L)
|
||||
#define MY_E_ERROR_NEGATIVE_SEEK ((HRESULT)0x80070083L)
|
||||
|
||||
/*
|
||||
// we can use FACILITY_ERRNO for some COM errors, that have errno equivalents:
|
||||
#define E_OUTOFMEMORY MY_HRESULT_FROM_errno_CONST_ERROR(ENOMEM)
|
||||
#define E_INVALIDARG MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
|
||||
#define MY__E_ERROR_NEGATIVE_SEEK MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
|
||||
#define MY_E_ERROR_NEGATIVE_SEEK MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
|
||||
*/
|
||||
|
||||
// gcc / clang : (sizeof(long) == sizeof(void*)) in 32/64 bits
|
||||
typedef long INT_PTR;
|
||||
typedef unsigned long UINT_PTR;
|
||||
|
||||
#define TEXT(quote) quote
|
||||
|
||||
#define FILE_ATTRIBUTE_READONLY 0x0001
|
||||
|
@ -160,18 +161,18 @@ typedef unsigned long UINT_PTR;
|
|||
|
||||
|
||||
#ifndef RINOK
|
||||
#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
|
||||
#define RINOK(x) { const int _result_ = (x); if (_result_ != 0) return _result_; }
|
||||
#endif
|
||||
|
||||
#ifndef RINOK_WRes
|
||||
#define RINOK_WRes(x) { WRes __result__ = (x); if (__result__ != 0) return __result__; }
|
||||
#define RINOK_WRes(x) { const WRes _result_ = (x); if (_result_ != 0) return _result_; }
|
||||
#endif
|
||||
|
||||
typedef unsigned char Byte;
|
||||
typedef short Int16;
|
||||
typedef unsigned short UInt16;
|
||||
|
||||
#ifdef _LZMA_UINT32_IS_ULONG
|
||||
#ifdef Z7_DECL_Int32_AS_long
|
||||
typedef long Int32;
|
||||
typedef unsigned long UInt32;
|
||||
#else
|
||||
|
@ -210,37 +211,51 @@ typedef size_t SIZE_T;
|
|||
#endif // _WIN32
|
||||
|
||||
|
||||
#define MY_HRES_ERROR__INTERNAL_ERROR ((HRESULT)0x8007054FL)
|
||||
#define MY_HRES_ERROR_INTERNAL_ERROR ((HRESULT)0x8007054FL)
|
||||
|
||||
|
||||
#ifdef _SZ_NO_INT_64
|
||||
|
||||
/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
|
||||
NOTES: Some code will work incorrectly in that case! */
|
||||
#ifdef Z7_DECL_Int64_AS_long
|
||||
|
||||
typedef long Int64;
|
||||
typedef unsigned long UInt64;
|
||||
|
||||
#else
|
||||
|
||||
#if defined(_MSC_VER) || defined(__BORLANDC__)
|
||||
#if (defined(_MSC_VER) || defined(__BORLANDC__)) && !defined(__clang__)
|
||||
typedef __int64 Int64;
|
||||
typedef unsigned __int64 UInt64;
|
||||
#define UINT64_CONST(n) n
|
||||
#else
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
#include <stdint.h>
|
||||
typedef int64_t Int64;
|
||||
typedef uint64_t UInt64;
|
||||
#else
|
||||
typedef long long int Int64;
|
||||
typedef unsigned long long int UInt64;
|
||||
#define UINT64_CONST(n) n ## ULL
|
||||
// #define UINT64_CONST(n) n ## ULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef _LZMA_NO_SYSTEM_SIZE_T
|
||||
typedef UInt32 SizeT;
|
||||
#define UINT64_CONST(n) n
|
||||
|
||||
|
||||
#ifdef Z7_DECL_SizeT_AS_unsigned_int
|
||||
typedef unsigned int SizeT;
|
||||
#else
|
||||
typedef size_t SizeT;
|
||||
#endif
|
||||
|
||||
/*
|
||||
#if (defined(_MSC_VER) && _MSC_VER <= 1200)
|
||||
typedef size_t MY_uintptr_t;
|
||||
#else
|
||||
#include <stdint.h>
|
||||
typedef uintptr_t MY_uintptr_t;
|
||||
#endif
|
||||
*/
|
||||
|
||||
typedef int BoolInt;
|
||||
/* typedef BoolInt Bool; */
|
||||
#define True 1
|
||||
|
@ -248,23 +263,23 @@ typedef int BoolInt;
|
|||
|
||||
|
||||
#ifdef _WIN32
|
||||
#define MY_STD_CALL __stdcall
|
||||
#define Z7_STDCALL __stdcall
|
||||
#else
|
||||
#define MY_STD_CALL
|
||||
#define Z7_STDCALL
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
#if _MSC_VER >= 1300
|
||||
#define MY_NO_INLINE __declspec(noinline)
|
||||
#define Z7_NO_INLINE __declspec(noinline)
|
||||
#else
|
||||
#define MY_NO_INLINE
|
||||
#define Z7_NO_INLINE
|
||||
#endif
|
||||
|
||||
#define MY_FORCE_INLINE __forceinline
|
||||
#define Z7_FORCE_INLINE __forceinline
|
||||
|
||||
#define MY_CDECL __cdecl
|
||||
#define MY_FAST_CALL __fastcall
|
||||
#define Z7_CDECL __cdecl
|
||||
#define Z7_FASTCALL __fastcall
|
||||
|
||||
#else // _MSC_VER
|
||||
|
||||
|
@ -272,27 +287,25 @@ typedef int BoolInt;
|
|||
|| (defined(__clang__) && (__clang_major__ >= 4)) \
|
||||
|| defined(__INTEL_COMPILER) \
|
||||
|| defined(__xlC__)
|
||||
#define MY_NO_INLINE __attribute__((noinline))
|
||||
// #define MY_FORCE_INLINE __attribute__((always_inline)) inline
|
||||
#define Z7_NO_INLINE __attribute__((noinline))
|
||||
#define Z7_FORCE_INLINE __attribute__((always_inline)) inline
|
||||
#else
|
||||
#define MY_NO_INLINE
|
||||
#define Z7_NO_INLINE
|
||||
#define Z7_FORCE_INLINE
|
||||
#endif
|
||||
|
||||
#define MY_FORCE_INLINE
|
||||
|
||||
|
||||
#define MY_CDECL
|
||||
#define Z7_CDECL
|
||||
|
||||
#if defined(_M_IX86) \
|
||||
|| defined(__i386__)
|
||||
// #define MY_FAST_CALL __attribute__((fastcall))
|
||||
// #define MY_FAST_CALL __attribute__((cdecl))
|
||||
#define MY_FAST_CALL
|
||||
// #define Z7_FASTCALL __attribute__((fastcall))
|
||||
// #define Z7_FASTCALL __attribute__((cdecl))
|
||||
#define Z7_FASTCALL
|
||||
#elif defined(MY_CPU_AMD64)
|
||||
// #define MY_FAST_CALL __attribute__((ms_abi))
|
||||
#define MY_FAST_CALL
|
||||
// #define Z7_FASTCALL __attribute__((ms_abi))
|
||||
#define Z7_FASTCALL
|
||||
#else
|
||||
#define MY_FAST_CALL
|
||||
#define Z7_FASTCALL
|
||||
#endif
|
||||
|
||||
#endif // _MSC_VER
|
||||
|
@ -300,41 +313,49 @@ typedef int BoolInt;
|
|||
|
||||
/* The following interfaces use first parameter as pointer to structure */
|
||||
|
||||
typedef struct IByteIn IByteIn;
|
||||
struct IByteIn
|
||||
// #define Z7_C_IFACE_CONST_QUAL
|
||||
#define Z7_C_IFACE_CONST_QUAL const
|
||||
|
||||
#define Z7_C_IFACE_DECL(a) \
|
||||
struct a ## _; \
|
||||
typedef Z7_C_IFACE_CONST_QUAL struct a ## _ * a ## Ptr; \
|
||||
typedef struct a ## _ a; \
|
||||
struct a ## _
|
||||
|
||||
|
||||
Z7_C_IFACE_DECL (IByteIn)
|
||||
{
|
||||
Byte (*Read)(const IByteIn *p); /* reads one byte, returns 0 in case of EOF or error */
|
||||
Byte (*Read)(IByteInPtr p); /* reads one byte, returns 0 in case of EOF or error */
|
||||
};
|
||||
#define IByteIn_Read(p) (p)->Read(p)
|
||||
|
||||
|
||||
typedef struct IByteOut IByteOut;
|
||||
struct IByteOut
|
||||
Z7_C_IFACE_DECL (IByteOut)
|
||||
{
|
||||
void (*Write)(const IByteOut *p, Byte b);
|
||||
void (*Write)(IByteOutPtr p, Byte b);
|
||||
};
|
||||
#define IByteOut_Write(p, b) (p)->Write(p, b)
|
||||
|
||||
|
||||
typedef struct ISeqInStream ISeqInStream;
|
||||
struct ISeqInStream
|
||||
Z7_C_IFACE_DECL (ISeqInStream)
|
||||
{
|
||||
SRes (*Read)(const ISeqInStream *p, void *buf, size_t *size);
|
||||
SRes (*Read)(ISeqInStreamPtr p, void *buf, size_t *size);
|
||||
/* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
|
||||
(output(*size) < input(*size)) is allowed */
|
||||
};
|
||||
#define ISeqInStream_Read(p, buf, size) (p)->Read(p, buf, size)
|
||||
|
||||
/* try to read as much as avail in stream and limited by (*processedSize) */
|
||||
SRes SeqInStream_ReadMax(ISeqInStreamPtr stream, void *buf, size_t *processedSize);
|
||||
/* it can return SZ_ERROR_INPUT_EOF */
|
||||
SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size);
|
||||
SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType);
|
||||
SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf);
|
||||
// SRes SeqInStream_Read(ISeqInStreamPtr stream, void *buf, size_t size);
|
||||
// SRes SeqInStream_Read2(ISeqInStreamPtr stream, void *buf, size_t size, SRes errorType);
|
||||
SRes SeqInStream_ReadByte(ISeqInStreamPtr stream, Byte *buf);
|
||||
|
||||
|
||||
typedef struct ISeqOutStream ISeqOutStream;
|
||||
struct ISeqOutStream
|
||||
Z7_C_IFACE_DECL (ISeqOutStream)
|
||||
{
|
||||
size_t (*Write)(const ISeqOutStream *p, const void *buf, size_t size);
|
||||
size_t (*Write)(ISeqOutStreamPtr p, const void *buf, size_t size);
|
||||
/* Returns: result - the number of actually written bytes.
|
||||
(result < size) means error */
|
||||
};
|
||||
|
@ -348,29 +369,26 @@ typedef enum
|
|||
} ESzSeek;
|
||||
|
||||
|
||||
typedef struct ISeekInStream ISeekInStream;
|
||||
struct ISeekInStream
|
||||
Z7_C_IFACE_DECL (ISeekInStream)
|
||||
{
|
||||
SRes (*Read)(const ISeekInStream *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
|
||||
SRes (*Seek)(const ISeekInStream *p, Int64 *pos, ESzSeek origin);
|
||||
SRes (*Read)(ISeekInStreamPtr p, void *buf, size_t *size); /* same as ISeqInStream::Read */
|
||||
SRes (*Seek)(ISeekInStreamPtr p, Int64 *pos, ESzSeek origin);
|
||||
};
|
||||
#define ISeekInStream_Read(p, buf, size) (p)->Read(p, buf, size)
|
||||
#define ISeekInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
|
||||
|
||||
|
||||
typedef struct ILookInStream ILookInStream;
|
||||
struct ILookInStream
|
||||
Z7_C_IFACE_DECL (ILookInStream)
|
||||
{
|
||||
SRes (*Look)(const ILookInStream *p, const void **buf, size_t *size);
|
||||
SRes (*Look)(ILookInStreamPtr p, const void **buf, size_t *size);
|
||||
/* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
|
||||
(output(*size) > input(*size)) is not allowed
|
||||
(output(*size) < input(*size)) is allowed */
|
||||
SRes (*Skip)(const ILookInStream *p, size_t offset);
|
||||
SRes (*Skip)(ILookInStreamPtr p, size_t offset);
|
||||
/* offset must be <= output(*size) of Look */
|
||||
|
||||
SRes (*Read)(const ILookInStream *p, void *buf, size_t *size);
|
||||
SRes (*Read)(ILookInStreamPtr p, void *buf, size_t *size);
|
||||
/* reads directly (without buffer). It's same as ISeqInStream::Read */
|
||||
SRes (*Seek)(const ILookInStream *p, Int64 *pos, ESzSeek origin);
|
||||
SRes (*Seek)(ILookInStreamPtr p, Int64 *pos, ESzSeek origin);
|
||||
};
|
||||
|
||||
#define ILookInStream_Look(p, buf, size) (p)->Look(p, buf, size)
|
||||
|
@ -379,19 +397,18 @@ struct ILookInStream
|
|||
#define ILookInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
|
||||
|
||||
|
||||
SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size);
|
||||
SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset);
|
||||
SRes LookInStream_LookRead(ILookInStreamPtr stream, void *buf, size_t *size);
|
||||
SRes LookInStream_SeekTo(ILookInStreamPtr stream, UInt64 offset);
|
||||
|
||||
/* reads via ILookInStream::Read */
|
||||
SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType);
|
||||
SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size);
|
||||
|
||||
SRes LookInStream_Read2(ILookInStreamPtr stream, void *buf, size_t size, SRes errorType);
|
||||
SRes LookInStream_Read(ILookInStreamPtr stream, void *buf, size_t size);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ILookInStream vt;
|
||||
const ISeekInStream *realStream;
|
||||
ISeekInStreamPtr realStream;
|
||||
|
||||
size_t pos;
|
||||
size_t size; /* it's data size */
|
||||
|
@ -403,13 +420,13 @@ typedef struct
|
|||
|
||||
void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead);
|
||||
|
||||
#define LookToRead2_Init(p) { (p)->pos = (p)->size = 0; }
|
||||
#define LookToRead2_INIT(p) { (p)->pos = (p)->size = 0; }
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ISeqInStream vt;
|
||||
const ILookInStream *realStream;
|
||||
ILookInStreamPtr realStream;
|
||||
} CSecToLook;
|
||||
|
||||
void SecToLook_CreateVTable(CSecToLook *p);
|
||||
|
@ -419,20 +436,19 @@ void SecToLook_CreateVTable(CSecToLook *p);
|
|||
typedef struct
|
||||
{
|
||||
ISeqInStream vt;
|
||||
const ILookInStream *realStream;
|
||||
ILookInStreamPtr realStream;
|
||||
} CSecToRead;
|
||||
|
||||
void SecToRead_CreateVTable(CSecToRead *p);
|
||||
|
||||
|
||||
typedef struct ICompressProgress ICompressProgress;
|
||||
|
||||
struct ICompressProgress
|
||||
Z7_C_IFACE_DECL (ICompressProgress)
|
||||
{
|
||||
SRes (*Progress)(const ICompressProgress *p, UInt64 inSize, UInt64 outSize);
|
||||
SRes (*Progress)(ICompressProgressPtr p, UInt64 inSize, UInt64 outSize);
|
||||
/* Returns: result. (result != SZ_OK) means break.
|
||||
Value (UInt64)(Int64)-1 for size means unknown value. */
|
||||
};
|
||||
|
||||
#define ICompressProgress_Progress(p, inSize, outSize) (p)->Progress(p, inSize, outSize)
|
||||
|
||||
|
||||
|
@ -470,13 +486,13 @@ struct ISzAlloc
|
|||
|
||||
|
||||
|
||||
#ifndef MY_container_of
|
||||
#ifndef Z7_container_of
|
||||
|
||||
/*
|
||||
#define MY_container_of(ptr, type, m) container_of(ptr, type, m)
|
||||
#define MY_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m)
|
||||
#define MY_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m)))
|
||||
#define MY_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m))))
|
||||
#define Z7_container_of(ptr, type, m) container_of(ptr, type, m)
|
||||
#define Z7_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m)
|
||||
#define Z7_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m)))
|
||||
#define Z7_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m))))
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -485,24 +501,64 @@ struct ISzAlloc
|
|||
GCC 4.8.1 : classes with non-public variable members"
|
||||
*/
|
||||
|
||||
#define MY_container_of(ptr, type, m) ((type *)(void *)((char *)(void *)(1 ? (ptr) : &((type *)0)->m) - MY_offsetof(type, m)))
|
||||
#define Z7_container_of(ptr, type, m) \
|
||||
((type *)(void *)((char *)(void *) \
|
||||
(1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
|
||||
|
||||
#define Z7_container_of_CONST(ptr, type, m) \
|
||||
((const type *)(const void *)((const char *)(const void *) \
|
||||
(1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
|
||||
|
||||
/*
|
||||
#define Z7_container_of_NON_CONST_FROM_CONST(ptr, type, m) \
|
||||
((type *)(void *)(const void *)((const char *)(const void *) \
|
||||
(1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
#define CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(void *)(ptr))
|
||||
#define Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(void *)(ptr))
|
||||
|
||||
// #define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
|
||||
#define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_container_of(ptr, type, m)
|
||||
// #define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_container_of_NON_CONST_FROM_CONST(ptr, type, m)
|
||||
|
||||
#define Z7_CONTAINER_FROM_VTBL_CONST(ptr, type, m) Z7_container_of_CONST(ptr, type, m)
|
||||
|
||||
#define Z7_CONTAINER_FROM_VTBL_CLS(ptr, type, m) Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
|
||||
/*
|
||||
#define CONTAINER_FROM_VTBL(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
|
||||
#define Z7_CONTAINER_FROM_VTBL_CLS(ptr, type, m) Z7_CONTAINER_FROM_VTBL(ptr, type, m)
|
||||
*/
|
||||
#define CONTAINER_FROM_VTBL(ptr, type, m) MY_container_of(ptr, type, m)
|
||||
#if defined (__clang__) || defined(__GNUC__)
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL \
|
||||
_Pragma("GCC diagnostic pop")
|
||||
#else
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL
|
||||
#endif
|
||||
|
||||
#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
|
||||
/*
|
||||
#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL(ptr, type, m)
|
||||
*/
|
||||
#define Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR(ptr, type, m, p) \
|
||||
Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL \
|
||||
type *p = Z7_CONTAINER_FROM_VTBL(ptr, type, m); \
|
||||
Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL
|
||||
|
||||
#define Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(type) \
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR(pp, type, vt, p)
|
||||
|
||||
|
||||
#define MY_memset_0_ARRAY(a) memset((a), 0, sizeof(a))
|
||||
// #define ZIP7_DECLARE_HANDLE(name) typedef void *name;
|
||||
#define Z7_DECLARE_HANDLE(name) struct name##_dummy{int unused;}; typedef struct name##_dummy *name;
|
||||
|
||||
|
||||
#define Z7_memset_0_ARRAY(a) memset((a), 0, sizeof(a))
|
||||
|
||||
#ifndef Z7_ARRAY_SIZE
|
||||
#define Z7_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
|
@ -520,6 +576,22 @@ struct ISzAlloc
|
|||
|
||||
#endif
|
||||
|
||||
#define k_PropVar_TimePrec_0 0
|
||||
#define k_PropVar_TimePrec_Unix 1
|
||||
#define k_PropVar_TimePrec_DOS 2
|
||||
#define k_PropVar_TimePrec_HighPrec 3
|
||||
#define k_PropVar_TimePrec_Base 16
|
||||
#define k_PropVar_TimePrec_100ns (k_PropVar_TimePrec_Base + 7)
|
||||
#define k_PropVar_TimePrec_1ns (k_PropVar_TimePrec_Base + 9)
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifndef Z7_ST
|
||||
#ifdef _7ZIP_ST
|
||||
#define Z7_ST
|
||||
#endif
|
||||
#endif
|
||||
*/
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#define MY_VER_MAJOR 21
|
||||
#define MY_VER_MINOR 07
|
||||
#define MY_VER_MAJOR 23
|
||||
#define MY_VER_MINOR 01
|
||||
#define MY_VER_BUILD 0
|
||||
#define MY_VERSION_NUMBERS "21.07"
|
||||
#define MY_VERSION_NUMBERS "23.01"
|
||||
#define MY_VERSION MY_VERSION_NUMBERS
|
||||
|
||||
#ifdef MY_CPU_NAME
|
||||
|
@ -10,12 +10,12 @@
|
|||
#define MY_VERSION_CPU MY_VERSION
|
||||
#endif
|
||||
|
||||
#define MY_DATE "2021-12-26"
|
||||
#define MY_DATE "2023-06-20"
|
||||
#undef MY_COPYRIGHT
|
||||
#undef MY_VERSION_COPYRIGHT_DATE
|
||||
#define MY_AUTHOR_NAME "Igor Pavlov"
|
||||
#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain"
|
||||
#define MY_COPYRIGHT_CR "Copyright (c) 1999-2021 Igor Pavlov"
|
||||
#define MY_COPYRIGHT_CR "Copyright (c) 1999-2023 Igor Pavlov"
|
||||
|
||||
#ifdef USE_COPYRIGHT_CR
|
||||
#define MY_COPYRIGHT MY_COPYRIGHT_CR
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
/* 7zWindows.h -- StdAfx
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_7Z_WINDOWS_H
|
||||
#define ZIP7_INC_7Z_WINDOWS_H
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#if defined(__clang__)
|
||||
# pragma clang diagnostic push
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4668) // '_WIN32_WINNT' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
|
||||
|
||||
#if _MSC_VER == 1900
|
||||
// for old kit10 versions
|
||||
// #pragma warning(disable : 4255) // winuser.h(13979): warning C4255: 'GetThreadDpiAwarenessContext':
|
||||
#endif
|
||||
// win10 Windows Kit:
|
||||
#endif // _MSC_VER
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER <= 1200 && !defined(_WIN64)
|
||||
// for msvc6 without sdk2003
|
||||
#define RPC_NO_WINDOWS_H
|
||||
#endif
|
||||
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
// #if defined(__GNUC__) && !defined(__clang__)
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <Windows.h>
|
||||
#endif
|
||||
// #include <basetsd.h>
|
||||
// #include <wtypes.h>
|
||||
|
||||
// but if precompiled with clang-cl then we need
|
||||
// #include <windows.h>
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
#if defined(__clang__)
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER <= 1200 && !defined(_WIN64)
|
||||
#ifndef _W64
|
||||
|
||||
typedef long LONG_PTR, *PLONG_PTR;
|
||||
typedef unsigned long ULONG_PTR, *PULONG_PTR;
|
||||
typedef ULONG_PTR DWORD_PTR, *PDWORD_PTR;
|
||||
|
||||
#define Z7_OLD_WIN_SDK
|
||||
#endif // _W64
|
||||
#endif // _MSC_VER == 1200
|
||||
|
||||
#ifdef Z7_OLD_WIN_SDK
|
||||
|
||||
#ifndef INVALID_FILE_ATTRIBUTES
|
||||
#define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
|
||||
#endif
|
||||
#ifndef INVALID_SET_FILE_POINTER
|
||||
#define INVALID_SET_FILE_POINTER ((DWORD)-1)
|
||||
#endif
|
||||
#ifndef FILE_SPECIAL_ACCESS
|
||||
#define FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS)
|
||||
#endif
|
||||
|
||||
// ShlObj.h:
|
||||
// #define BIF_NEWDIALOGSTYLE 0x0040
|
||||
|
||||
#pragma warning(disable : 4201)
|
||||
// #pragma warning(disable : 4115)
|
||||
|
||||
#undef VARIANT_TRUE
|
||||
#define VARIANT_TRUE ((VARIANT_BOOL)-1)
|
||||
#endif
|
||||
|
||||
#endif // Z7_OLD_WIN_SDK
|
||||
|
||||
#ifdef UNDER_CE
|
||||
#undef VARIANT_TRUE
|
||||
#define VARIANT_TRUE ((VARIANT_BOOL)-1)
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#if _MSC_VER >= 1400 && _MSC_VER <= 1600
|
||||
// BaseTsd.h(148) : 'HandleToULong' : unreferenced inline function has been removed
|
||||
// string.h
|
||||
// #pragma warning(disable : 4514)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/* #include "7zTypes.h" */
|
||||
|
||||
#endif
|
|
@ -0,0 +1,60 @@
|
|||
/* Aes.h -- AES encryption / decryption
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_AES_H
|
||||
#define ZIP7_INC_AES_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#define AES_BLOCK_SIZE 16
|
||||
|
||||
/* Call AesGenTables one time before other AES functions */
|
||||
void AesGenTables(void);
|
||||
|
||||
/* UInt32 pointers must be 16-byte aligned */
|
||||
|
||||
/* 16-byte (4 * 32-bit words) blocks: 1 (IV) + 1 (keyMode) + 15 (AES-256 roundKeys) */
|
||||
#define AES_NUM_IVMRK_WORDS ((1 + 1 + 15) * 4)
|
||||
|
||||
/* aes - 16-byte aligned pointer to keyMode+roundKeys sequence */
|
||||
/* keySize = 16 or 24 or 32 (bytes) */
|
||||
typedef void (Z7_FASTCALL *AES_SET_KEY_FUNC)(UInt32 *aes, const Byte *key, unsigned keySize);
|
||||
void Z7_FASTCALL Aes_SetKey_Enc(UInt32 *aes, const Byte *key, unsigned keySize);
|
||||
void Z7_FASTCALL Aes_SetKey_Dec(UInt32 *aes, const Byte *key, unsigned keySize);
|
||||
|
||||
/* ivAes - 16-byte aligned pointer to iv+keyMode+roundKeys sequence: UInt32[AES_NUM_IVMRK_WORDS] */
|
||||
void AesCbc_Init(UInt32 *ivAes, const Byte *iv); /* iv size is AES_BLOCK_SIZE */
|
||||
|
||||
/* data - 16-byte aligned pointer to data */
|
||||
/* numBlocks - the number of 16-byte blocks in data array */
|
||||
typedef void (Z7_FASTCALL *AES_CODE_FUNC)(UInt32 *ivAes, Byte *data, size_t numBlocks);
|
||||
|
||||
extern AES_CODE_FUNC g_AesCbc_Decode;
|
||||
#ifndef Z7_SFX
|
||||
extern AES_CODE_FUNC g_AesCbc_Encode;
|
||||
extern AES_CODE_FUNC g_AesCtr_Code;
|
||||
#define k_Aes_SupportedFunctions_HW (1 << 2)
|
||||
#define k_Aes_SupportedFunctions_HW_256 (1 << 3)
|
||||
extern UInt32 g_Aes_SupportedFunctions_Flags;
|
||||
#endif
|
||||
|
||||
|
||||
#define Z7_DECLARE_AES_CODE_FUNC(funcName) \
|
||||
void Z7_FASTCALL funcName(UInt32 *ivAes, Byte *data, size_t numBlocks);
|
||||
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCbc_Encode)
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCbc_Decode)
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCtr_Code)
|
||||
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCbc_Encode_HW)
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCbc_Decode_HW)
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCtr_Code_HW)
|
||||
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCbc_Decode_HW_256)
|
||||
Z7_DECLARE_AES_CODE_FUNC (AesCtr_Code_HW_256)
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -1,19 +1,32 @@
|
|||
/* Alloc.h -- Memory allocation functions
|
||||
2021-07-13 : Igor Pavlov : Public domain */
|
||||
2023-03-04 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __COMMON_ALLOC_H
|
||||
#define __COMMON_ALLOC_H
|
||||
#ifndef ZIP7_INC_ALLOC_H
|
||||
#define ZIP7_INC_ALLOC_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
/*
|
||||
MyFree(NULL) : is allowed, as free(NULL)
|
||||
MyAlloc(0) : returns NULL : but malloc(0) is allowed to return NULL or non_NULL
|
||||
MyRealloc(NULL, 0) : returns NULL : but realloc(NULL, 0) is allowed to return NULL or non_NULL
|
||||
MyRealloc() is similar to realloc() for the following cases:
|
||||
MyRealloc(non_NULL, 0) : returns NULL and always calls MyFree(ptr)
|
||||
MyRealloc(NULL, non_ZERO) : returns NULL, if allocation failed
|
||||
MyRealloc(non_NULL, non_ZERO) : returns NULL, if reallocation failed
|
||||
*/
|
||||
|
||||
void *MyAlloc(size_t size);
|
||||
void MyFree(void *address);
|
||||
void *MyRealloc(void *address, size_t size);
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#ifdef Z7_LARGE_PAGES
|
||||
void SetLargePageSize(void);
|
||||
#endif
|
||||
|
||||
void *MidAlloc(size_t size);
|
||||
void MidFree(void *address);
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Bcj2.h -- BCJ2 Converter for x86 code
|
||||
2014-11-10 : Igor Pavlov : Public domain */
|
||||
/* Bcj2.h -- BCJ2 converter for x86 code (Branch CALL/JUMP variant2)
|
||||
2023-03-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __BCJ2_H
|
||||
#define __BCJ2_H
|
||||
#ifndef ZIP7_INC_BCJ2_H
|
||||
#define ZIP7_INC_BCJ2_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -26,37 +26,68 @@ enum
|
|||
BCJ2_DEC_STATE_ORIG_3,
|
||||
|
||||
BCJ2_DEC_STATE_ORIG,
|
||||
BCJ2_DEC_STATE_OK
|
||||
BCJ2_DEC_STATE_ERROR /* after detected data error */
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
BCJ2_ENC_STATE_ORIG = BCJ2_NUM_STREAMS,
|
||||
BCJ2_ENC_STATE_OK
|
||||
BCJ2_ENC_STATE_FINISHED /* it's state after fully encoded stream */
|
||||
};
|
||||
|
||||
|
||||
#define BCJ2_IS_32BIT_STREAM(s) ((s) == BCJ2_STREAM_CALL || (s) == BCJ2_STREAM_JUMP)
|
||||
/* #define BCJ2_IS_32BIT_STREAM(s) ((s) == BCJ2_STREAM_CALL || (s) == BCJ2_STREAM_JUMP) */
|
||||
#define BCJ2_IS_32BIT_STREAM(s) ((unsigned)((unsigned)(s) - (unsigned)BCJ2_STREAM_CALL) < 2)
|
||||
|
||||
/*
|
||||
CBcj2Dec / CBcj2Enc
|
||||
bufs sizes:
|
||||
BUF_SIZE(n) = lims[n] - bufs[n]
|
||||
bufs sizes for BCJ2_STREAM_CALL and BCJ2_STREAM_JUMP must be mutliply of 4:
|
||||
bufs sizes for BCJ2_STREAM_CALL and BCJ2_STREAM_JUMP must be multiply of 4:
|
||||
(BUF_SIZE(BCJ2_STREAM_CALL) & 3) == 0
|
||||
(BUF_SIZE(BCJ2_STREAM_JUMP) & 3) == 0
|
||||
*/
|
||||
|
||||
// typedef UInt32 CBcj2Prob;
|
||||
typedef UInt16 CBcj2Prob;
|
||||
|
||||
/*
|
||||
BCJ2 encoder / decoder internal requirements:
|
||||
- If last bytes of stream contain marker (e8/e8/0f8x), then
|
||||
there is also encoded symbol (0 : no conversion) in RC stream.
|
||||
- One case of overlapped instructions is supported,
|
||||
if last byte of converted instruction is (0f) and next byte is (8x):
|
||||
marker [xx xx xx 0f] 8x
|
||||
then the pair (0f 8x) is treated as marker.
|
||||
*/
|
||||
|
||||
/* ---------- BCJ2 Decoder ---------- */
|
||||
|
||||
/*
|
||||
CBcj2Dec:
|
||||
dest is allowed to overlap with bufs[BCJ2_STREAM_MAIN], with the following conditions:
|
||||
(dest) is allowed to overlap with bufs[BCJ2_STREAM_MAIN], with the following conditions:
|
||||
bufs[BCJ2_STREAM_MAIN] >= dest &&
|
||||
bufs[BCJ2_STREAM_MAIN] - dest >= tempReserv +
|
||||
bufs[BCJ2_STREAM_MAIN] - dest >=
|
||||
BUF_SIZE(BCJ2_STREAM_CALL) +
|
||||
BUF_SIZE(BCJ2_STREAM_JUMP)
|
||||
tempReserv = 0 : for first call of Bcj2Dec_Decode
|
||||
tempReserv = 4 : for any other calls of Bcj2Dec_Decode
|
||||
overlap with offset = 1 is not allowed
|
||||
reserve = bufs[BCJ2_STREAM_MAIN] - dest -
|
||||
( BUF_SIZE(BCJ2_STREAM_CALL) +
|
||||
BUF_SIZE(BCJ2_STREAM_JUMP) )
|
||||
and additional conditions:
|
||||
if (it's first call of Bcj2Dec_Decode() after Bcj2Dec_Init())
|
||||
{
|
||||
(reserve != 1) : if (ver < v23.00)
|
||||
}
|
||||
else // if there are more than one calls of Bcj2Dec_Decode() after Bcj2Dec_Init())
|
||||
{
|
||||
(reserve >= 6) : if (ver < v23.00)
|
||||
(reserve >= 4) : if (ver >= v23.00)
|
||||
We need that (reserve) because after first call of Bcj2Dec_Decode(),
|
||||
CBcj2Dec::temp can contain up to 4 bytes for writing to (dest).
|
||||
}
|
||||
(reserve == 0) is allowed, if we decode full stream via single call of Bcj2Dec_Decode().
|
||||
(reserve == 0) also is allowed in case of multi-call, if we use fixed buffers,
|
||||
and (reserve) is calculated from full (final) sizes of all streams before first call.
|
||||
*/
|
||||
|
||||
typedef struct
|
||||
|
@ -68,22 +99,66 @@ typedef struct
|
|||
|
||||
unsigned state; /* BCJ2_STREAM_MAIN has more priority than BCJ2_STATE_ORIG */
|
||||
|
||||
UInt32 ip;
|
||||
Byte temp[4];
|
||||
UInt32 ip; /* property of starting base for decoding */
|
||||
UInt32 temp; /* Byte temp[4]; */
|
||||
UInt32 range;
|
||||
UInt32 code;
|
||||
UInt16 probs[2 + 256];
|
||||
CBcj2Prob probs[2 + 256];
|
||||
} CBcj2Dec;
|
||||
|
||||
|
||||
/* Note:
|
||||
Bcj2Dec_Init() sets (CBcj2Dec::ip = 0)
|
||||
if (ip != 0) property is required, the caller must set CBcj2Dec::ip after Bcj2Dec_Init()
|
||||
*/
|
||||
void Bcj2Dec_Init(CBcj2Dec *p);
|
||||
|
||||
/* Returns: SZ_OK or SZ_ERROR_DATA */
|
||||
|
||||
/* Bcj2Dec_Decode():
|
||||
returns:
|
||||
SZ_OK
|
||||
SZ_ERROR_DATA : if data in 5 starting bytes of BCJ2_STREAM_RC stream are not correct
|
||||
*/
|
||||
SRes Bcj2Dec_Decode(CBcj2Dec *p);
|
||||
|
||||
#define Bcj2Dec_IsFinished(_p_) ((_p_)->code == 0)
|
||||
/* To check that decoding was finished you can compare
|
||||
sizes of processed streams with sizes known from another sources.
|
||||
You must do at least one mandatory check from the two following options:
|
||||
- the check for size of processed output (ORIG) stream.
|
||||
- the check for size of processed input (MAIN) stream.
|
||||
additional optional checks:
|
||||
- the checks for processed sizes of all input streams (MAIN, CALL, JUMP, RC)
|
||||
- the checks Bcj2Dec_IsMaybeFinished*()
|
||||
also before actual decoding you can check that the
|
||||
following condition is met for stream sizes:
|
||||
( size(ORIG) == size(MAIN) + size(CALL) + size(JUMP) )
|
||||
*/
|
||||
|
||||
/* (state == BCJ2_STREAM_MAIN) means that decoder is ready for
|
||||
additional input data in BCJ2_STREAM_MAIN stream.
|
||||
Note that (state == BCJ2_STREAM_MAIN) is allowed for non-finished decoding.
|
||||
*/
|
||||
#define Bcj2Dec_IsMaybeFinished_state_MAIN(_p_) ((_p_)->state == BCJ2_STREAM_MAIN)
|
||||
|
||||
/* if the stream decoding was finished correctly, then range decoder
|
||||
part of CBcj2Dec also was finished, and then (CBcj2Dec::code == 0).
|
||||
Note that (CBcj2Dec::code == 0) is allowed for non-finished decoding.
|
||||
*/
|
||||
#define Bcj2Dec_IsMaybeFinished_code(_p_) ((_p_)->code == 0)
|
||||
|
||||
/* use Bcj2Dec_IsMaybeFinished() only as additional check
|
||||
after at least one mandatory check from the two following options:
|
||||
- the check for size of processed output (ORIG) stream.
|
||||
- the check for size of processed input (MAIN) stream.
|
||||
*/
|
||||
#define Bcj2Dec_IsMaybeFinished(_p_) ( \
|
||||
Bcj2Dec_IsMaybeFinished_state_MAIN(_p_) && \
|
||||
Bcj2Dec_IsMaybeFinished_code(_p_))
|
||||
|
||||
|
||||
|
||||
/* ---------- BCJ2 Encoder ---------- */
|
||||
|
||||
typedef enum
|
||||
{
|
||||
BCJ2_ENC_FINISH_MODE_CONTINUE,
|
||||
|
@ -91,6 +166,91 @@ typedef enum
|
|||
BCJ2_ENC_FINISH_MODE_END_STREAM
|
||||
} EBcj2Enc_FinishMode;
|
||||
|
||||
/*
|
||||
BCJ2_ENC_FINISH_MODE_CONTINUE:
|
||||
process non finished encoding.
|
||||
It notifies the encoder that additional further calls
|
||||
can provide more input data (src) than provided by current call.
|
||||
In that case the CBcj2Enc encoder still can move (src) pointer
|
||||
up to (srcLim), but CBcj2Enc encoder can store some of the last
|
||||
processed bytes (up to 4 bytes) from src to internal CBcj2Enc::temp[] buffer.
|
||||
at return:
|
||||
(CBcj2Enc::src will point to position that includes
|
||||
processed data and data copied to (temp[]) buffer)
|
||||
That data from (temp[]) buffer will be used in further calls.
|
||||
|
||||
BCJ2_ENC_FINISH_MODE_END_BLOCK:
|
||||
finish encoding of current block (ended at srcLim) without RC flushing.
|
||||
at return: if (CBcj2Enc::state == BCJ2_ENC_STATE_ORIG) &&
|
||||
CBcj2Enc::src == CBcj2Enc::srcLim)
|
||||
: it shows that block encoding was finished. And the encoder is
|
||||
ready for new (src) data or for stream finish operation.
|
||||
finished block means
|
||||
{
|
||||
CBcj2Enc has completed block encoding up to (srcLim).
|
||||
(1 + 4 bytes) or (2 + 4 bytes) CALL/JUMP cortages will
|
||||
not cross block boundary at (srcLim).
|
||||
temporary CBcj2Enc buffer for (ORIG) src data is empty.
|
||||
3 output uncompressed streams (MAIN, CALL, JUMP) were flushed.
|
||||
RC stream was not flushed. And RC stream will cross block boundary.
|
||||
}
|
||||
Note: some possible implementation of BCJ2 encoder could
|
||||
write branch marker (e8/e8/0f8x) in one call of Bcj2Enc_Encode(),
|
||||
and it could calculate symbol for RC in another call of Bcj2Enc_Encode().
|
||||
BCJ2 encoder uses ip/fileIp/fileSize/relatLimit values to calculate RC symbol.
|
||||
And these CBcj2Enc variables can have different values in different Bcj2Enc_Encode() calls.
|
||||
So caller must finish each block with BCJ2_ENC_FINISH_MODE_END_BLOCK
|
||||
to ensure that RC symbol is calculated and written in proper block.
|
||||
|
||||
BCJ2_ENC_FINISH_MODE_END_STREAM
|
||||
finish encoding of stream (ended at srcLim) fully including RC flushing.
|
||||
at return: if (CBcj2Enc::state == BCJ2_ENC_STATE_FINISHED)
|
||||
: it shows that stream encoding was finished fully,
|
||||
and all output streams were flushed fully.
|
||||
also Bcj2Enc_IsFinished() can be called.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
32-bit relative offset in JUMP/CALL commands is
|
||||
- (mod 4 GiB) for 32-bit x86 code
|
||||
- signed Int32 for 64-bit x86-64 code
|
||||
BCJ2 encoder also does internal relative to absolute address conversions.
|
||||
And there are 2 possible ways to do it:
|
||||
before v23: we used 32-bit variables and (mod 4 GiB) conversion
|
||||
since v23: we use 64-bit variables and (signed Int32 offset) conversion.
|
||||
The absolute address condition for conversion in v23:
|
||||
((UInt64)((Int64)ip64 - (Int64)fileIp64 + 5 + (Int32)offset) < (UInt64)fileSize64)
|
||||
note that if (fileSize64 > 2 GiB). there is difference between
|
||||
old (mod 4 GiB) way (v22) and new (signed Int32 offset) way (v23).
|
||||
And new (v23) way is more suitable to encode 64-bit x86-64 code for (fileSize64 > 2 GiB) cases.
|
||||
*/
|
||||
|
||||
/*
|
||||
// for old (v22) way for conversion:
|
||||
typedef UInt32 CBcj2Enc_ip_unsigned;
|
||||
typedef Int32 CBcj2Enc_ip_signed;
|
||||
#define BCJ2_ENC_FileSize_MAX ((UInt32)1 << 31)
|
||||
*/
|
||||
typedef UInt64 CBcj2Enc_ip_unsigned;
|
||||
typedef Int64 CBcj2Enc_ip_signed;
|
||||
|
||||
/* maximum size of file that can be used for conversion condition */
|
||||
#define BCJ2_ENC_FileSize_MAX ((CBcj2Enc_ip_unsigned)0 - 2)
|
||||
|
||||
/* default value of fileSize64_minus1 variable that means
|
||||
that absolute address limitation will not be used */
|
||||
#define BCJ2_ENC_FileSizeField_UNLIMITED ((CBcj2Enc_ip_unsigned)0 - 1)
|
||||
|
||||
/* calculate value that later can be set to CBcj2Enc::fileSize64_minus1 */
|
||||
#define BCJ2_ENC_GET_FileSizeField_VAL_FROM_FileSize(fileSize) \
|
||||
((CBcj2Enc_ip_unsigned)(fileSize) - 1)
|
||||
|
||||
/* set CBcj2Enc::fileSize64_minus1 variable from size of file */
|
||||
#define Bcj2Enc_SET_FileSize(p, fileSize) \
|
||||
(p)->fileSize64_minus1 = BCJ2_ENC_GET_FileSizeField_VAL_FROM_FileSize(fileSize);
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
Byte *bufs[BCJ2_NUM_STREAMS];
|
||||
|
@ -101,45 +261,71 @@ typedef struct
|
|||
unsigned state;
|
||||
EBcj2Enc_FinishMode finishMode;
|
||||
|
||||
Byte prevByte;
|
||||
Byte context;
|
||||
Byte flushRem;
|
||||
Byte isFlushState;
|
||||
|
||||
Byte cache;
|
||||
UInt32 range;
|
||||
UInt64 low;
|
||||
UInt64 cacheSize;
|
||||
|
||||
// UInt32 context; // for marker version, it can include marker flag.
|
||||
|
||||
UInt32 ip;
|
||||
|
||||
/* 32-bit ralative offset in JUMP/CALL commands is
|
||||
- (mod 4 GB) in 32-bit mode
|
||||
- signed Int32 in 64-bit mode
|
||||
We use (mod 4 GB) check for fileSize.
|
||||
Use fileSize up to 2 GB, if you want to support 32-bit and 64-bit code conversion. */
|
||||
UInt32 fileIp;
|
||||
UInt32 fileSize; /* (fileSize <= ((UInt32)1 << 31)), 0 means no_limit */
|
||||
UInt32 relatLimit; /* (relatLimit <= ((UInt32)1 << 31)), 0 means desable_conversion */
|
||||
/* (ip64) and (fileIp64) correspond to virtual source stream position
|
||||
that doesn't include data in temp[] */
|
||||
CBcj2Enc_ip_unsigned ip64; /* current (ip) position */
|
||||
CBcj2Enc_ip_unsigned fileIp64; /* start (ip) position of current file */
|
||||
CBcj2Enc_ip_unsigned fileSize64_minus1; /* size of current file (for conversion limitation) */
|
||||
UInt32 relatLimit; /* (relatLimit <= ((UInt32)1 << 31)) : 0 means disable_conversion */
|
||||
// UInt32 relatExcludeBits;
|
||||
|
||||
UInt32 tempTarget;
|
||||
unsigned tempPos;
|
||||
Byte temp[4 * 2];
|
||||
|
||||
unsigned flushPos;
|
||||
|
||||
UInt16 probs[2 + 256];
|
||||
unsigned tempPos; /* the number of bytes that were copied to temp[] buffer
|
||||
(tempPos <= 4) outside of Bcj2Enc_Encode() */
|
||||
// Byte temp[4]; // for marker version
|
||||
Byte temp[8];
|
||||
CBcj2Prob probs[2 + 256];
|
||||
} CBcj2Enc;
|
||||
|
||||
void Bcj2Enc_Init(CBcj2Enc *p);
|
||||
|
||||
|
||||
/*
|
||||
Bcj2Enc_Encode(): at exit:
|
||||
p->State < BCJ2_NUM_STREAMS : we need more buffer space for output stream
|
||||
(bufs[p->State] == lims[p->State])
|
||||
p->State == BCJ2_ENC_STATE_ORIG : we need more data in input src stream
|
||||
(src == srcLim)
|
||||
p->State == BCJ2_ENC_STATE_FINISHED : after fully encoded stream
|
||||
*/
|
||||
void Bcj2Enc_Encode(CBcj2Enc *p);
|
||||
|
||||
#define Bcj2Enc_Get_InputData_Size(p) ((SizeT)((p)->srcLim - (p)->src) + (p)->tempPos)
|
||||
#define Bcj2Enc_IsFinished(p) ((p)->flushPos == 5)
|
||||
/* Bcj2Enc encoder can look ahead for up 4 bytes of source stream.
|
||||
CBcj2Enc::tempPos : is the number of bytes that were copied from input stream to temp[] buffer.
|
||||
(CBcj2Enc::src) after Bcj2Enc_Encode() is starting position after
|
||||
fully processed data and after data copied to temp buffer.
|
||||
So if the caller needs to get real number of fully processed input
|
||||
bytes (without look ahead data in temp buffer),
|
||||
the caller must subtruct (CBcj2Enc::tempPos) value from processed size
|
||||
value that is calculated based on current (CBcj2Enc::src):
|
||||
cur_processed_pos = Calc_Big_Processed_Pos(enc.src)) -
|
||||
Bcj2Enc_Get_AvailInputSize_in_Temp(&enc);
|
||||
*/
|
||||
/* get the size of input data that was stored in temp[] buffer: */
|
||||
#define Bcj2Enc_Get_AvailInputSize_in_Temp(p) ((p)->tempPos)
|
||||
|
||||
#define Bcj2Enc_IsFinished(p) ((p)->flushRem == 0)
|
||||
|
||||
#define BCJ2_RELAT_LIMIT_NUM_BITS 26
|
||||
#define BCJ2_RELAT_LIMIT ((UInt32)1 << BCJ2_RELAT_LIMIT_NUM_BITS)
|
||||
|
||||
/* limit for CBcj2Enc::fileSize variable */
|
||||
#define BCJ2_FileSize_MAX ((UInt32)1 << 31)
|
||||
/* Note : the decoder supports overlapping of marker (0f 80).
|
||||
But we can eliminate such overlapping cases by setting
|
||||
the limit for relative offset conversion as
|
||||
CBcj2Enc::relatLimit <= (0x0f << 24) == (240 MiB)
|
||||
*/
|
||||
/* default value for CBcj2Enc::relatLimit */
|
||||
#define BCJ2_ENC_RELAT_LIMIT_DEFAULT ((UInt32)0x0f << 24)
|
||||
#define BCJ2_ENC_RELAT_LIMIT_MAX ((UInt32)1 << 31)
|
||||
// #define BCJ2_RELAT_EXCLUDE_NUM_BITS 5
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -1,64 +1,99 @@
|
|||
/* Bra.h -- Branch converters for executables
|
||||
2013-01-18 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __BRA_H
|
||||
#define __BRA_H
|
||||
#ifndef ZIP7_INC_BRA_H
|
||||
#define ZIP7_INC_BRA_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
/*
|
||||
These functions convert relative addresses to absolute addresses
|
||||
in CALL instructions to increase the compression ratio.
|
||||
|
||||
In:
|
||||
data - data buffer
|
||||
size - size of data
|
||||
ip - current virtual Instruction Pinter (IP) value
|
||||
state - state variable for x86 converter
|
||||
encoding - 0 (for decoding), 1 (for encoding)
|
||||
|
||||
Out:
|
||||
state - state variable for x86 converter
|
||||
#define Z7_BRANCH_CONV_DEC(name) z7_BranchConv_ ## name ## _Dec
|
||||
#define Z7_BRANCH_CONV_ENC(name) z7_BranchConv_ ## name ## _Enc
|
||||
#define Z7_BRANCH_CONV_ST_DEC(name) z7_BranchConvSt_ ## name ## _Dec
|
||||
#define Z7_BRANCH_CONV_ST_ENC(name) z7_BranchConvSt_ ## name ## _Enc
|
||||
|
||||
Returns:
|
||||
The number of processed bytes. If you call these functions with multiple calls,
|
||||
you must start next call with first byte after block of processed bytes.
|
||||
#define Z7_BRANCH_CONV_DECL(name) Byte * name(Byte *data, SizeT size, UInt32 pc)
|
||||
#define Z7_BRANCH_CONV_ST_DECL(name) Byte * name(Byte *data, SizeT size, UInt32 pc, UInt32 *state)
|
||||
|
||||
typedef Z7_BRANCH_CONV_DECL( (*z7_Func_BranchConv));
|
||||
typedef Z7_BRANCH_CONV_ST_DECL((*z7_Func_BranchConvSt));
|
||||
|
||||
#define Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL 0
|
||||
Z7_BRANCH_CONV_ST_DECL(Z7_BRANCH_CONV_ST_DEC(X86));
|
||||
Z7_BRANCH_CONV_ST_DECL(Z7_BRANCH_CONV_ST_ENC(X86));
|
||||
|
||||
#define Z7_BRANCH_FUNCS_DECL(name) \
|
||||
Z7_BRANCH_CONV_DECL(Z7_BRANCH_CONV_DEC(name)); \
|
||||
Z7_BRANCH_CONV_DECL(Z7_BRANCH_CONV_ENC(name));
|
||||
|
||||
Z7_BRANCH_FUNCS_DECL(ARM64)
|
||||
Z7_BRANCH_FUNCS_DECL(ARM)
|
||||
Z7_BRANCH_FUNCS_DECL(ARMT)
|
||||
Z7_BRANCH_FUNCS_DECL(PPC)
|
||||
Z7_BRANCH_FUNCS_DECL(SPARC)
|
||||
Z7_BRANCH_FUNCS_DECL(IA64)
|
||||
|
||||
/*
|
||||
These functions convert data that contain CPU instructions.
|
||||
Each such function converts relative addresses to absolute addresses in some
|
||||
branch instructions: CALL (in all converters) and JUMP (X86 converter only).
|
||||
Such conversion allows to increase compression ratio, if we compress that data.
|
||||
|
||||
There are 2 types of converters:
|
||||
Byte * Conv_RISC (Byte *data, SizeT size, UInt32 pc);
|
||||
Byte * ConvSt_X86(Byte *data, SizeT size, UInt32 pc, UInt32 *state);
|
||||
Each Converter supports 2 versions: one for encoding
|
||||
and one for decoding (_Enc/_Dec postfixes in function name).
|
||||
|
||||
In params:
|
||||
data : data buffer
|
||||
size : size of data
|
||||
pc : current virtual Program Counter (Instruction Pinter) value
|
||||
In/Out param:
|
||||
state : pointer to state variable (for X86 converter only)
|
||||
|
||||
Return:
|
||||
The pointer to position in (data) buffer after last byte that was processed.
|
||||
If the caller calls converter again, it must call it starting with that position.
|
||||
But the caller is allowed to move data in buffer. so pointer to
|
||||
current processed position also will be changed for next call.
|
||||
Also the caller must increase internal (pc) value for next call.
|
||||
|
||||
Each converter has some characteristics: Endian, Alignment, LookAhead.
|
||||
Type Endian Alignment LookAhead
|
||||
|
||||
x86 little 1 4
|
||||
X86 little 1 4
|
||||
ARMT little 2 2
|
||||
ARM little 4 0
|
||||
ARM64 little 4 0
|
||||
PPC big 4 0
|
||||
SPARC big 4 0
|
||||
IA64 little 16 0
|
||||
|
||||
size must be >= Alignment + LookAhead, if it's not last block.
|
||||
If (size < Alignment + LookAhead), converter returns 0.
|
||||
(data) must be aligned for (Alignment).
|
||||
processed size can be calculated as:
|
||||
SizeT processed = Conv(data, size, pc) - data;
|
||||
if (processed == 0)
|
||||
it means that converter needs more data for processing.
|
||||
If (size < Alignment + LookAhead)
|
||||
then (processed == 0) is allowed.
|
||||
|
||||
Example:
|
||||
|
||||
UInt32 ip = 0;
|
||||
for ()
|
||||
{
|
||||
; size must be >= Alignment + LookAhead, if it's not last block
|
||||
SizeT processed = Convert(data, size, ip, 1);
|
||||
data += processed;
|
||||
size -= processed;
|
||||
ip += processed;
|
||||
}
|
||||
Example code for conversion in loop:
|
||||
UInt32 pc = 0;
|
||||
size = 0;
|
||||
for (;;)
|
||||
{
|
||||
size += Load_more_input_data(data + size);
|
||||
SizeT processed = Conv(data, size, pc) - data;
|
||||
if (processed == 0 && no_more_input_data_after_size)
|
||||
break; // we stop convert loop
|
||||
data += processed;
|
||||
size -= processed;
|
||||
pc += processed;
|
||||
}
|
||||
*/
|
||||
|
||||
#define x86_Convert_Init(state) { state = 0; }
|
||||
SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding);
|
||||
SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
|
||||
SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
|
||||
SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
|
||||
SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
|
||||
SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,12 +1,37 @@
|
|||
/* Compiler.h
|
||||
2021-01-05 : Igor Pavlov : Public domain */
|
||||
/* Compiler.h : Compiler specific defines and pragmas
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_COMPILER_H
|
||||
#define __7Z_COMPILER_H
|
||||
#ifndef ZIP7_INC_COMPILER_H
|
||||
#define ZIP7_INC_COMPILER_H
|
||||
|
||||
#if defined(__clang__)
|
||||
# define Z7_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
|
||||
#endif
|
||||
#if defined(__clang__) && defined(__apple_build_version__)
|
||||
# define Z7_APPLE_CLANG_VERSION Z7_CLANG_VERSION
|
||||
#elif defined(__clang__)
|
||||
# define Z7_LLVM_CLANG_VERSION Z7_CLANG_VERSION
|
||||
#elif defined(__GNUC__)
|
||||
# define Z7_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#if !defined(__clang__) && !defined(__GNUC__)
|
||||
#define Z7_MSC_VER_ORIGINAL _MSC_VER
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
#define Z7_MINGW
|
||||
#endif
|
||||
|
||||
// #pragma GCC diagnostic ignored "-Wunknown-pragmas"
|
||||
|
||||
#ifdef __clang__
|
||||
// padding size of '' with 4 bytes to alignment boundary
|
||||
#pragma GCC diagnostic ignored "-Wpadded"
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wunused-private-field"
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
|
@ -17,24 +42,115 @@
|
|||
#pragma warning(disable : 4214) // nonstandard extension used : bit field types other than int
|
||||
#endif
|
||||
|
||||
#if _MSC_VER >= 1300
|
||||
#pragma warning(disable : 4996) // This function or variable may be unsafe
|
||||
#else
|
||||
#pragma warning(disable : 4511) // copy constructor could not be generated
|
||||
#pragma warning(disable : 4512) // assignment operator could not be generated
|
||||
#pragma warning(disable : 4514) // unreferenced inline function has been removed
|
||||
#pragma warning(disable : 4702) // unreachable code
|
||||
#pragma warning(disable : 4710) // not inlined
|
||||
#pragma warning(disable : 4714) // function marked as __forceinline not inlined
|
||||
#pragma warning(disable : 4786) // identifier was truncated to '255' characters in the debug information
|
||||
#endif
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1800
|
||||
#pragma warning(disable : 4464) // relative include path contains '..'
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
|
||||
#pragma clang diagnostic ignored "-Wmicrosoft-exception-spec"
|
||||
// #pragma clang diagnostic ignored "-Wreserved-id-macro"
|
||||
#endif
|
||||
// == 1200 : -O1 : for __forceinline
|
||||
// >= 1900 : -O1 : for printf
|
||||
#pragma warning(disable : 4710) // function not inlined
|
||||
|
||||
#if _MSC_VER < 1900
|
||||
// winnt.h: 'Int64ShllMod32'
|
||||
#pragma warning(disable : 4514) // unreferenced inline function has been removed
|
||||
#endif
|
||||
|
||||
#if _MSC_VER < 1300
|
||||
// #pragma warning(disable : 4702) // unreachable code
|
||||
// Bra.c : -O1:
|
||||
#pragma warning(disable : 4714) // function marked as __forceinline not inlined
|
||||
#endif
|
||||
|
||||
/*
|
||||
#if _MSC_VER > 1400 && _MSC_VER <= 1900
|
||||
// strcat: This function or variable may be unsafe
|
||||
// sysinfoapi.h: kit10: GetVersion was declared deprecated
|
||||
#pragma warning(disable : 4996)
|
||||
#endif
|
||||
*/
|
||||
|
||||
#if _MSC_VER > 1200
|
||||
// -Wall warnings
|
||||
|
||||
#pragma warning(disable : 4711) // function selected for automatic inline expansion
|
||||
#pragma warning(disable : 4820) // '2' bytes padding added after data member
|
||||
|
||||
#if _MSC_VER >= 1400 && _MSC_VER < 1920
|
||||
// 1400: string.h: _DBG_MEMCPY_INLINE_
|
||||
// 1600 - 191x : smmintrin.h __cplusplus'
|
||||
// is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
|
||||
#pragma warning(disable : 4668)
|
||||
|
||||
// 1400 - 1600 : WinDef.h : 'FARPROC' :
|
||||
// 1900 - 191x : immintrin.h: _readfsbase_u32
|
||||
// no function prototype given : converting '()' to '(void)'
|
||||
#pragma warning(disable : 4255)
|
||||
#endif
|
||||
|
||||
#if _MSC_VER >= 1914
|
||||
// Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified
|
||||
#pragma warning(disable : 5045)
|
||||
#endif
|
||||
|
||||
#endif // _MSC_VER > 1200
|
||||
#endif // _MSC_VER
|
||||
|
||||
|
||||
#if defined(__clang__) && (__clang_major__ >= 4)
|
||||
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE \
|
||||
_Pragma("clang loop unroll(disable)") \
|
||||
_Pragma("clang loop vectorize(disable)")
|
||||
#define Z7_ATTRIB_NO_VECTORIZE
|
||||
#elif defined(__GNUC__) && (__GNUC__ >= 5)
|
||||
#define Z7_ATTRIB_NO_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
|
||||
// __attribute__((optimize("no-unroll-loops")));
|
||||
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
#elif defined(_MSC_VER) && (_MSC_VER >= 1920)
|
||||
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE \
|
||||
_Pragma("loop( no_vector )")
|
||||
#define Z7_ATTRIB_NO_VECTORIZE
|
||||
#else
|
||||
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
#define Z7_ATTRIB_NO_VECTORIZE
|
||||
#endif
|
||||
|
||||
#if defined(MY_CPU_X86_OR_AMD64) && ( \
|
||||
defined(__clang__) && (__clang_major__ >= 4) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 5))
|
||||
#define Z7_ATTRIB_NO_SSE __attribute__((__target__("no-sse")))
|
||||
#else
|
||||
#define Z7_ATTRIB_NO_SSE
|
||||
#endif
|
||||
|
||||
#define Z7_ATTRIB_NO_VECTOR \
|
||||
Z7_ATTRIB_NO_VECTORIZE \
|
||||
Z7_ATTRIB_NO_SSE
|
||||
|
||||
|
||||
#if defined(__clang__) && (__clang_major__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 1000) \
|
||||
/* || defined(_MSC_VER) && (_MSC_VER >= 1920) */
|
||||
// GCC is not good for __builtin_expect()
|
||||
#define Z7_LIKELY(x) (__builtin_expect((x), 1))
|
||||
#define Z7_UNLIKELY(x) (__builtin_expect((x), 0))
|
||||
// #define Z7_unlikely [[unlikely]]
|
||||
// #define Z7_likely [[likely]]
|
||||
#else
|
||||
#define Z7_LIKELY(x) (x)
|
||||
#define Z7_UNLIKELY(x) (x)
|
||||
// #define Z7_likely
|
||||
#endif
|
||||
|
||||
|
||||
#if (defined(Z7_CLANG_VERSION) && (Z7_CLANG_VERSION >= 36000))
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_RESERVED_MACRO_IDENTIFIER \
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wreserved-macro-identifier\"")
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_END_RESERVED_MACRO_IDENTIFIER \
|
||||
_Pragma("GCC diagnostic pop")
|
||||
#else
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_RESERVED_MACRO_IDENTIFIER
|
||||
#define Z7_DIAGNOSCTIC_IGNORE_END_RESERVED_MACRO_IDENTIFIER
|
||||
#endif
|
||||
|
||||
#define UNUSED_VAR(x) (void)x;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* CpuArch.h -- CPU specific code
|
||||
2021-07-13 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __CPU_ARCH_H
|
||||
#define __CPU_ARCH_H
|
||||
#ifndef ZIP7_INC_CPU_ARCH_H
|
||||
#define ZIP7_INC_CPU_ARCH_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -51,7 +51,13 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
|| defined(__AARCH64EB__) \
|
||||
|| defined(__aarch64__)
|
||||
#define MY_CPU_ARM64
|
||||
#define MY_CPU_NAME "arm64"
|
||||
#ifdef __ILP32__
|
||||
#define MY_CPU_NAME "arm64-32"
|
||||
#define MY_CPU_SIZEOF_POINTER 4
|
||||
#else
|
||||
#define MY_CPU_NAME "arm64"
|
||||
#define MY_CPU_SIZEOF_POINTER 8
|
||||
#endif
|
||||
#define MY_CPU_64BIT
|
||||
#endif
|
||||
|
||||
|
@ -68,8 +74,10 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
#define MY_CPU_ARM
|
||||
|
||||
#if defined(__thumb__) || defined(__THUMBEL__) || defined(_M_ARMT)
|
||||
#define MY_CPU_ARMT
|
||||
#define MY_CPU_NAME "armt"
|
||||
#else
|
||||
#define MY_CPU_ARM32
|
||||
#define MY_CPU_NAME "arm"
|
||||
#endif
|
||||
/* #define MY_CPU_32BIT */
|
||||
|
@ -103,6 +111,8 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
|| defined(__PPC__) \
|
||||
|| defined(_POWER)
|
||||
|
||||
#define MY_CPU_PPC_OR_PPC64
|
||||
|
||||
#if defined(__ppc64__) \
|
||||
|| defined(__powerpc64__) \
|
||||
|| defined(_LP64) \
|
||||
|
@ -123,12 +133,15 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
#endif
|
||||
|
||||
|
||||
#if defined(__sparc64__)
|
||||
#define MY_CPU_NAME "sparc64"
|
||||
#define MY_CPU_64BIT
|
||||
#elif defined(__sparc__)
|
||||
#define MY_CPU_NAME "sparc"
|
||||
/* #define MY_CPU_32BIT */
|
||||
#if defined(__riscv) \
|
||||
|| defined(__riscv__)
|
||||
#if __riscv_xlen == 32
|
||||
#define MY_CPU_NAME "riscv32"
|
||||
#elif __riscv_xlen == 64
|
||||
#define MY_CPU_NAME "riscv64"
|
||||
#else
|
||||
#define MY_CPU_NAME "riscv"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -194,6 +207,9 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
#error Stop_Compiling_Bad_Endian
|
||||
#endif
|
||||
|
||||
#if !defined(MY_CPU_LE) && !defined(MY_CPU_BE)
|
||||
#error Stop_Compiling_CPU_ENDIAN_must_be_detected_at_compile_time
|
||||
#endif
|
||||
|
||||
#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT)
|
||||
#error Stop_Compiling_Bad_32_64_BIT
|
||||
|
@ -250,6 +266,67 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
|
||||
|
||||
|
||||
#ifdef __has_builtin
|
||||
#define Z7_has_builtin(x) __has_builtin(x)
|
||||
#else
|
||||
#define Z7_has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
|
||||
#define Z7_BSWAP32_CONST(v) \
|
||||
( (((UInt32)(v) << 24) ) \
|
||||
| (((UInt32)(v) << 8) & (UInt32)0xff0000) \
|
||||
| (((UInt32)(v) >> 8) & (UInt32)0xff00 ) \
|
||||
| (((UInt32)(v) >> 24) ))
|
||||
|
||||
|
||||
#if defined(_MSC_VER) && (_MSC_VER >= 1300)
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
/* Note: these macros will use bswap instruction (486), that is unsupported in 386 cpu */
|
||||
|
||||
#pragma intrinsic(_byteswap_ushort)
|
||||
#pragma intrinsic(_byteswap_ulong)
|
||||
#pragma intrinsic(_byteswap_uint64)
|
||||
|
||||
#define Z7_BSWAP16(v) _byteswap_ushort(v)
|
||||
#define Z7_BSWAP32(v) _byteswap_ulong (v)
|
||||
#define Z7_BSWAP64(v) _byteswap_uint64(v)
|
||||
#define Z7_CPU_FAST_BSWAP_SUPPORTED
|
||||
|
||||
#elif (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
|
||||
|| (defined(__clang__) && Z7_has_builtin(__builtin_bswap16))
|
||||
|
||||
#define Z7_BSWAP16(v) __builtin_bswap16(v)
|
||||
#define Z7_BSWAP32(v) __builtin_bswap32(v)
|
||||
#define Z7_BSWAP64(v) __builtin_bswap64(v)
|
||||
#define Z7_CPU_FAST_BSWAP_SUPPORTED
|
||||
|
||||
#else
|
||||
|
||||
#define Z7_BSWAP16(v) ((UInt16) \
|
||||
( ((UInt32)(v) << 8) \
|
||||
| ((UInt32)(v) >> 8) \
|
||||
))
|
||||
|
||||
#define Z7_BSWAP32(v) Z7_BSWAP32_CONST(v)
|
||||
|
||||
#define Z7_BSWAP64(v) \
|
||||
( ( ( (UInt64)(v) ) << 8 * 7 ) \
|
||||
| ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 1) ) << 8 * 5 ) \
|
||||
| ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 2) ) << 8 * 3 ) \
|
||||
| ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 3) ) << 8 * 1 ) \
|
||||
| ( ( (UInt64)(v) >> 8 * 1 ) & ((UInt32)0xff << 8 * 3) ) \
|
||||
| ( ( (UInt64)(v) >> 8 * 3 ) & ((UInt32)0xff << 8 * 2) ) \
|
||||
| ( ( (UInt64)(v) >> 8 * 5 ) & ((UInt32)0xff << 8 * 1) ) \
|
||||
| ( ( (UInt64)(v) >> 8 * 7 ) ) \
|
||||
)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#ifdef MY_CPU_LE
|
||||
#if defined(MY_CPU_X86_OR_AMD64) \
|
||||
|| defined(MY_CPU_ARM64)
|
||||
|
@ -269,13 +346,11 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
|
||||
#ifdef MY_CPU_LE_UNALIGN_64
|
||||
#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
|
||||
#define SetUi64(p, v) { *(UInt64 *)(void *)(p) = (v); }
|
||||
#endif
|
||||
|
||||
#define SetUi16(p, v) { *(UInt16 *)(void *)(p) = (v); }
|
||||
#define SetUi32(p, v) { *(UInt32 *)(void *)(p) = (v); }
|
||||
#ifdef MY_CPU_LE_UNALIGN_64
|
||||
#define SetUi64(p, v) { *(UInt64 *)(void *)(p) = (v); }
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
|
@ -302,51 +377,26 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
#endif
|
||||
|
||||
|
||||
#ifndef MY_CPU_LE_UNALIGN_64
|
||||
|
||||
#ifndef GetUi64
|
||||
#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
|
||||
#endif
|
||||
|
||||
#ifndef SetUi64
|
||||
#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
|
||||
SetUi32(_ppp2_ , (UInt32)_vvv2_); \
|
||||
SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)); }
|
||||
|
||||
SetUi32(_ppp2_ , (UInt32)_vvv2_) \
|
||||
SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)) }
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MY_CPU_LE_UNALIGN) && defined(Z7_CPU_FAST_BSWAP_SUPPORTED)
|
||||
|
||||
#define GetBe32(p) Z7_BSWAP32 (*(const UInt32 *)(const void *)(p))
|
||||
#define SetBe32(p, v) { (*(UInt32 *)(void *)(p)) = Z7_BSWAP32(v); }
|
||||
|
||||
#ifdef __has_builtin
|
||||
#define MY__has_builtin(x) __has_builtin(x)
|
||||
#else
|
||||
#define MY__has_builtin(x) 0
|
||||
#if defined(MY_CPU_LE_UNALIGN_64)
|
||||
#define GetBe64(p) Z7_BSWAP64 (*(const UInt64 *)(const void *)(p))
|
||||
#endif
|
||||
|
||||
#if defined(MY_CPU_LE_UNALIGN) && /* defined(_WIN64) && */ defined(_MSC_VER) && (_MSC_VER >= 1300)
|
||||
|
||||
/* Note: we use bswap instruction, that is unsupported in 386 cpu */
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#pragma intrinsic(_byteswap_ushort)
|
||||
#pragma intrinsic(_byteswap_ulong)
|
||||
#pragma intrinsic(_byteswap_uint64)
|
||||
|
||||
/* #define GetBe16(p) _byteswap_ushort(*(const UInt16 *)(const Byte *)(p)) */
|
||||
#define GetBe32(p) _byteswap_ulong (*(const UInt32 *)(const void *)(p))
|
||||
#define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const void *)(p))
|
||||
|
||||
#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = _byteswap_ulong(v)
|
||||
|
||||
#elif defined(MY_CPU_LE_UNALIGN) && ( \
|
||||
(defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
|
||||
|| (defined(__clang__) && MY__has_builtin(__builtin_bswap16)) )
|
||||
|
||||
/* #define GetBe16(p) __builtin_bswap16(*(const UInt16 *)(const void *)(p)) */
|
||||
#define GetBe32(p) __builtin_bswap32(*(const UInt32 *)(const void *)(p))
|
||||
#define GetBe64(p) __builtin_bswap64(*(const UInt64 *)(const void *)(p))
|
||||
|
||||
#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = __builtin_bswap32(v)
|
||||
|
||||
#else
|
||||
|
||||
#define GetBe32(p) ( \
|
||||
|
@ -355,8 +405,6 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
((UInt32)((const Byte *)(p))[2] << 8) | \
|
||||
((const Byte *)(p))[3] )
|
||||
|
||||
#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
|
||||
|
||||
#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
|
||||
_ppp_[0] = (Byte)(_vvv_ >> 24); \
|
||||
_ppp_[1] = (Byte)(_vvv_ >> 16); \
|
||||
|
@ -365,50 +413,83 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
|
|||
|
||||
#endif
|
||||
|
||||
#ifndef GetBe64
|
||||
#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
|
||||
#endif
|
||||
|
||||
#ifndef GetBe16
|
||||
|
||||
#define GetBe16(p) ( (UInt16) ( \
|
||||
((UInt16)((const Byte *)(p))[0] << 8) | \
|
||||
((const Byte *)(p))[1] ))
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MY_CPU_BE)
|
||||
#define Z7_CONV_BE_TO_NATIVE_CONST32(v) (v)
|
||||
#define Z7_CONV_LE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
|
||||
#define Z7_CONV_NATIVE_TO_BE_32(v) (v)
|
||||
#elif defined(MY_CPU_LE)
|
||||
#define Z7_CONV_BE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
|
||||
#define Z7_CONV_LE_TO_NATIVE_CONST32(v) (v)
|
||||
#define Z7_CONV_NATIVE_TO_BE_32(v) Z7_BSWAP32(v)
|
||||
#else
|
||||
#error Stop_Compiling_Unknown_Endian_CONV
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MY_CPU_BE)
|
||||
|
||||
#define GetBe32a(p) (*(const UInt32 *)(const void *)(p))
|
||||
#define GetBe16a(p) (*(const UInt16 *)(const void *)(p))
|
||||
#define SetBe32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
|
||||
#define SetBe16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
|
||||
|
||||
#define GetUi32a(p) GetUi32(p)
|
||||
#define GetUi16a(p) GetUi16(p)
|
||||
#define SetUi32a(p, v) SetUi32(p, v)
|
||||
#define SetUi16a(p, v) SetUi16(p, v)
|
||||
|
||||
#elif defined(MY_CPU_LE)
|
||||
|
||||
#define GetUi32a(p) (*(const UInt32 *)(const void *)(p))
|
||||
#define GetUi16a(p) (*(const UInt16 *)(const void *)(p))
|
||||
#define SetUi32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
|
||||
#define SetUi16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
|
||||
|
||||
#define GetBe32a(p) GetBe32(p)
|
||||
#define GetBe16a(p) GetBe16(p)
|
||||
#define SetBe32a(p, v) SetBe32(p, v)
|
||||
#define SetBe16a(p, v) SetBe16(p, v)
|
||||
|
||||
#else
|
||||
#error Stop_Compiling_Unknown_Endian_CPU_a
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MY_CPU_X86_OR_AMD64) \
|
||||
|| defined(MY_CPU_ARM_OR_ARM64) \
|
||||
|| defined(MY_CPU_PPC_OR_PPC64)
|
||||
#define Z7_CPU_FAST_ROTATE_SUPPORTED
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
|
||||
typedef struct
|
||||
{
|
||||
UInt32 maxFunc;
|
||||
UInt32 vendor[3];
|
||||
UInt32 ver;
|
||||
UInt32 b;
|
||||
UInt32 c;
|
||||
UInt32 d;
|
||||
} Cx86cpuid;
|
||||
|
||||
enum
|
||||
{
|
||||
CPU_FIRM_INTEL,
|
||||
CPU_FIRM_AMD,
|
||||
CPU_FIRM_VIA
|
||||
};
|
||||
|
||||
void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d);
|
||||
|
||||
BoolInt x86cpuid_CheckAndRead(Cx86cpuid *p);
|
||||
int x86cpuid_GetFirm(const Cx86cpuid *p);
|
||||
|
||||
#define x86cpuid_GetFamily(ver) (((ver >> 16) & 0xFF0) | ((ver >> 8) & 0xF))
|
||||
#define x86cpuid_GetModel(ver) (((ver >> 12) & 0xF0) | ((ver >> 4) & 0xF))
|
||||
#define x86cpuid_GetStepping(ver) (ver & 0xF)
|
||||
|
||||
BoolInt CPU_Is_InOrder(void);
|
||||
void Z7_FASTCALL z7_x86_cpuid(UInt32 a[4], UInt32 function);
|
||||
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void);
|
||||
#if defined(MY_CPU_AMD64)
|
||||
#define Z7_IF_X86_CPUID_SUPPORTED
|
||||
#else
|
||||
#define Z7_IF_X86_CPUID_SUPPORTED if (z7_x86_cpuid_GetMaxFunc())
|
||||
#endif
|
||||
|
||||
BoolInt CPU_IsSupported_AES(void);
|
||||
BoolInt CPU_IsSupported_AVX(void);
|
||||
BoolInt CPU_IsSupported_AVX2(void);
|
||||
BoolInt CPU_IsSupported_VAES_AVX2(void);
|
||||
BoolInt CPU_IsSupported_CMOV(void);
|
||||
BoolInt CPU_IsSupported_SSE(void);
|
||||
BoolInt CPU_IsSupported_SSE2(void);
|
||||
BoolInt CPU_IsSupported_SSSE3(void);
|
||||
BoolInt CPU_IsSupported_SSE41(void);
|
||||
BoolInt CPU_IsSupported_SHA(void);
|
||||
|
@ -433,8 +514,8 @@ BoolInt CPU_IsSupported_AES(void);
|
|||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
int My_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize);
|
||||
int My_sysctlbyname_Get_UInt32(const char *name, UInt32 *val);
|
||||
int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize);
|
||||
int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val);
|
||||
#endif
|
||||
|
||||
EXTERN_C_END
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Delta.h -- Delta converter
|
||||
2013-01-18 : Igor Pavlov : Public domain */
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __DELTA_H
|
||||
#define __DELTA_H
|
||||
#ifndef ZIP7_INC_DELTA_H
|
||||
#define ZIP7_INC_DELTA_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
/* DllSecur.h -- DLL loading for security
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_DLL_SECUR_H
|
||||
#define ZIP7_INC_DLL_SECUR_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
void My_SetDefaultDllDirectories(void);
|
||||
void LoadSecurityDlls(void);
|
||||
|
||||
#endif
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -1,8 +1,8 @@
|
|||
/* LzFind.h -- Match finder for LZ algorithms
|
||||
2021-07-13 : Igor Pavlov : Public domain */
|
||||
2023-03-04 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZ_FIND_H
|
||||
#define __LZ_FIND_H
|
||||
#ifndef ZIP7_INC_LZ_FIND_H
|
||||
#define ZIP7_INC_LZ_FIND_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -10,9 +10,9 @@ EXTERN_C_BEGIN
|
|||
|
||||
typedef UInt32 CLzRef;
|
||||
|
||||
typedef struct _CMatchFinder
|
||||
typedef struct
|
||||
{
|
||||
Byte *buffer;
|
||||
const Byte *buffer;
|
||||
UInt32 pos;
|
||||
UInt32 posLimit;
|
||||
UInt32 streamPos; /* wrap over Zero is allowed (streamPos < pos). Use (UInt32)(streamPos - pos) */
|
||||
|
@ -32,8 +32,8 @@ typedef struct _CMatchFinder
|
|||
UInt32 hashMask;
|
||||
UInt32 cutValue;
|
||||
|
||||
Byte *bufferBase;
|
||||
ISeqInStream *stream;
|
||||
Byte *bufBase;
|
||||
ISeqInStreamPtr stream;
|
||||
|
||||
UInt32 blockSize;
|
||||
UInt32 keepSizeBefore;
|
||||
|
@ -43,7 +43,9 @@ typedef struct _CMatchFinder
|
|||
size_t directInputRem;
|
||||
UInt32 historySize;
|
||||
UInt32 fixedHashSize;
|
||||
UInt32 hashSizeSum;
|
||||
Byte numHashBytes_Min;
|
||||
Byte numHashOutBits;
|
||||
Byte _pad2_[2];
|
||||
SRes result;
|
||||
UInt32 crc[256];
|
||||
size_t numRefs;
|
||||
|
@ -69,24 +71,45 @@ void MatchFinder_ReadIfRequired(CMatchFinder *p);
|
|||
|
||||
void MatchFinder_Construct(CMatchFinder *p);
|
||||
|
||||
/* Conditions:
|
||||
historySize <= 3 GB
|
||||
keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
|
||||
/* (directInput = 0) is default value.
|
||||
It's required to provide correct (directInput) value
|
||||
before calling MatchFinder_Create().
|
||||
You can set (directInput) by any of the following calls:
|
||||
- MatchFinder_SET_DIRECT_INPUT_BUF()
|
||||
- MatchFinder_SET_STREAM()
|
||||
- MatchFinder_SET_STREAM_MODE()
|
||||
*/
|
||||
|
||||
#define MatchFinder_SET_DIRECT_INPUT_BUF(p, _src_, _srcLen_) { \
|
||||
(p)->stream = NULL; \
|
||||
(p)->directInput = 1; \
|
||||
(p)->buffer = (_src_); \
|
||||
(p)->directInputRem = (_srcLen_); }
|
||||
|
||||
/*
|
||||
#define MatchFinder_SET_STREAM_MODE(p) { \
|
||||
(p)->directInput = 0; }
|
||||
*/
|
||||
|
||||
#define MatchFinder_SET_STREAM(p, _stream_) { \
|
||||
(p)->stream = _stream_; \
|
||||
(p)->directInput = 0; }
|
||||
|
||||
|
||||
int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
|
||||
UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
|
||||
ISzAllocPtr alloc);
|
||||
void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc);
|
||||
void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems);
|
||||
// void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
|
||||
|
||||
/*
|
||||
#define Inline_MatchFinder_InitPos(p, val) \
|
||||
#define MatchFinder_INIT_POS(p, val) \
|
||||
(p)->pos = (val); \
|
||||
(p)->streamPos = (val);
|
||||
*/
|
||||
|
||||
#define Inline_MatchFinder_ReduceOffsets(p, subValue) \
|
||||
// void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
|
||||
#define MatchFinder_REDUCE_OFFSETS(p, subValue) \
|
||||
(p)->pos -= (subValue); \
|
||||
(p)->streamPos -= (subValue);
|
||||
|
||||
|
@ -107,7 +130,7 @@ typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
|
|||
typedef UInt32 * (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
|
||||
typedef void (*Mf_Skip_Func)(void *object, UInt32);
|
||||
|
||||
typedef struct _IMatchFinder
|
||||
typedef struct
|
||||
{
|
||||
Mf_Init_Func Init;
|
||||
Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
/* LzFindMt.h -- multithreaded Match finder for LZ algorithms
|
||||
2023-03-05 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_LZ_FIND_MT_H
|
||||
#define ZIP7_INC_LZ_FIND_MT_H
|
||||
|
||||
#include "LzFind.h"
|
||||
#include "Threads.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
typedef struct
|
||||
{
|
||||
UInt32 numProcessedBlocks;
|
||||
CThread thread;
|
||||
UInt64 affinity;
|
||||
|
||||
BoolInt wasCreated;
|
||||
BoolInt needStart;
|
||||
BoolInt csWasInitialized;
|
||||
BoolInt csWasEntered;
|
||||
|
||||
BoolInt exit;
|
||||
BoolInt stopWriting;
|
||||
|
||||
CAutoResetEvent canStart;
|
||||
CAutoResetEvent wasStopped;
|
||||
CSemaphore freeSemaphore;
|
||||
CSemaphore filledSemaphore;
|
||||
CCriticalSection cs;
|
||||
// UInt32 numBlocks_Sent;
|
||||
} CMtSync;
|
||||
|
||||
typedef UInt32 * (*Mf_Mix_Matches)(void *p, UInt32 matchMinPos, UInt32 *distances);
|
||||
|
||||
/* kMtCacheLineDummy must be >= size_of_CPU_cache_line */
|
||||
#define kMtCacheLineDummy 128
|
||||
|
||||
typedef void (*Mf_GetHeads)(const Byte *buffer, UInt32 pos,
|
||||
UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc);
|
||||
|
||||
typedef struct
|
||||
{
|
||||
/* LZ */
|
||||
const Byte *pointerToCurPos;
|
||||
UInt32 *btBuf;
|
||||
const UInt32 *btBufPos;
|
||||
const UInt32 *btBufPosLimit;
|
||||
UInt32 lzPos;
|
||||
UInt32 btNumAvailBytes;
|
||||
|
||||
UInt32 *hash;
|
||||
UInt32 fixedHashSize;
|
||||
// UInt32 hash4Mask;
|
||||
UInt32 historySize;
|
||||
const UInt32 *crc;
|
||||
|
||||
Mf_Mix_Matches MixMatchesFunc;
|
||||
UInt32 failure_LZ_BT; // failure in BT transfered to LZ
|
||||
// UInt32 failure_LZ_LZ; // failure in LZ tables
|
||||
UInt32 failureBuf[1];
|
||||
// UInt32 crc[256];
|
||||
|
||||
/* LZ + BT */
|
||||
CMtSync btSync;
|
||||
Byte btDummy[kMtCacheLineDummy];
|
||||
|
||||
/* BT */
|
||||
UInt32 *hashBuf;
|
||||
UInt32 hashBufPos;
|
||||
UInt32 hashBufPosLimit;
|
||||
UInt32 hashNumAvail;
|
||||
UInt32 failure_BT;
|
||||
|
||||
|
||||
CLzRef *son;
|
||||
UInt32 matchMaxLen;
|
||||
UInt32 numHashBytes;
|
||||
UInt32 pos;
|
||||
const Byte *buffer;
|
||||
UInt32 cyclicBufferPos;
|
||||
UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
|
||||
UInt32 cutValue;
|
||||
|
||||
/* BT + Hash */
|
||||
CMtSync hashSync;
|
||||
/* Byte hashDummy[kMtCacheLineDummy]; */
|
||||
|
||||
/* Hash */
|
||||
Mf_GetHeads GetHeadsFunc;
|
||||
CMatchFinder *MatchFinder;
|
||||
// CMatchFinder MatchFinder;
|
||||
} CMatchFinderMt;
|
||||
|
||||
// only for Mt part
|
||||
void MatchFinderMt_Construct(CMatchFinderMt *p);
|
||||
void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc);
|
||||
|
||||
SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
|
||||
UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc);
|
||||
void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable);
|
||||
|
||||
/* call MatchFinderMt_InitMt() before IMatchFinder::Init() */
|
||||
SRes MatchFinderMt_InitMt(CMatchFinderMt *p);
|
||||
void MatchFinderMt_ReleaseStream(CMatchFinderMt *p);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -1,8 +1,8 @@
|
|||
/* LzHash.h -- HASH functions for LZ algorithms
|
||||
2019-10-30 : Igor Pavlov : Public domain */
|
||||
/* LzHash.h -- HASH constants for LZ algorithms
|
||||
2023-03-05 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZ_HASH_H
|
||||
#define __LZ_HASH_H
|
||||
#ifndef ZIP7_INC_LZ_HASH_H
|
||||
#define ZIP7_INC_LZ_HASH_H
|
||||
|
||||
/*
|
||||
(kHash2Size >= (1 << 8)) : Required
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Lzma2Dec.h -- LZMA2 Decoder
|
||||
2018-02-19 : Igor Pavlov : Public domain */
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA2_DEC_H
|
||||
#define __LZMA2_DEC_H
|
||||
#ifndef ZIP7_INC_LZMA2_DEC_H
|
||||
#define ZIP7_INC_LZMA2_DEC_H
|
||||
|
||||
#include "LzmaDec.h"
|
||||
|
||||
|
@ -22,9 +22,10 @@ typedef struct
|
|||
CLzmaDec decoder;
|
||||
} CLzma2Dec;
|
||||
|
||||
#define Lzma2Dec_Construct(p) LzmaDec_Construct(&(p)->decoder)
|
||||
#define Lzma2Dec_FreeProbs(p, alloc) LzmaDec_FreeProbs(&(p)->decoder, alloc)
|
||||
#define Lzma2Dec_Free(p, alloc) LzmaDec_Free(&(p)->decoder, alloc)
|
||||
#define Lzma2Dec_CONSTRUCT(p) LzmaDec_CONSTRUCT(&(p)->decoder)
|
||||
#define Lzma2Dec_Construct(p) Lzma2Dec_CONSTRUCT(p)
|
||||
#define Lzma2Dec_FreeProbs(p, alloc) LzmaDec_FreeProbs(&(p)->decoder, alloc)
|
||||
#define Lzma2Dec_Free(p, alloc) LzmaDec_Free(&(p)->decoder, alloc)
|
||||
|
||||
SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc);
|
||||
SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc);
|
||||
|
@ -90,7 +91,7 @@ Lzma2Dec_GetUnpackExtra() returns the value that shows
|
|||
at current input positon.
|
||||
*/
|
||||
|
||||
#define Lzma2Dec_GetUnpackExtra(p) ((p)->isExtraMode ? (p)->unpackSize : 0);
|
||||
#define Lzma2Dec_GetUnpackExtra(p) ((p)->isExtraMode ? (p)->unpackSize : 0)
|
||||
|
||||
|
||||
/* ---------- One Call Interface ---------- */
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Lzma2DecMt.h -- LZMA2 Decoder Multi-thread
|
||||
2018-02-17 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA2_DEC_MT_H
|
||||
#define __LZMA2_DEC_MT_H
|
||||
#ifndef ZIP7_INC_LZMA2_DEC_MT_H
|
||||
#define ZIP7_INC_LZMA2_DEC_MT_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -13,7 +13,7 @@ typedef struct
|
|||
size_t inBufSize_ST;
|
||||
size_t outStep_ST;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
unsigned numThreads;
|
||||
size_t inBufSize_MT;
|
||||
size_t outBlockMax;
|
||||
|
@ -38,7 +38,9 @@ SRes:
|
|||
SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
|
||||
*/
|
||||
|
||||
typedef void * CLzma2DecMtHandle;
|
||||
typedef struct CLzma2DecMt CLzma2DecMt;
|
||||
typedef CLzma2DecMt * CLzma2DecMtHandle;
|
||||
// Z7_DECLARE_HANDLE(CLzma2DecMtHandle)
|
||||
|
||||
CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid);
|
||||
void Lzma2DecMt_Destroy(CLzma2DecMtHandle p);
|
||||
|
@ -46,11 +48,11 @@ void Lzma2DecMt_Destroy(CLzma2DecMtHandle p);
|
|||
SRes Lzma2DecMt_Decode(CLzma2DecMtHandle p,
|
||||
Byte prop,
|
||||
const CLzma2DecMtProps *props,
|
||||
ISeqOutStream *outStream,
|
||||
ISeqOutStreamPtr outStream,
|
||||
const UInt64 *outDataSize, // NULL means undefined
|
||||
int finishMode, // 0 - partial unpacking is allowed, 1 - if lzma2 stream must be finished
|
||||
// Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
// const Byte *inData, size_t inDataSize,
|
||||
|
||||
// out variables:
|
||||
|
@ -58,7 +60,7 @@ SRes Lzma2DecMt_Decode(CLzma2DecMtHandle p,
|
|||
int *isMT, /* out: (*isMT == 0), if single thread decoding was used */
|
||||
|
||||
// UInt64 *outProcessed,
|
||||
ICompressProgress *progress);
|
||||
ICompressProgressPtr progress);
|
||||
|
||||
|
||||
/* ---------- Read from CLzma2DecMtHandle Interface ---------- */
|
||||
|
@ -67,7 +69,7 @@ SRes Lzma2DecMt_Init(CLzma2DecMtHandle pp,
|
|||
Byte prop,
|
||||
const CLzma2DecMtProps *props,
|
||||
const UInt64 *outDataSize, int finishMode,
|
||||
ISeqInStream *inStream);
|
||||
ISeqInStreamPtr inStream);
|
||||
|
||||
SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp,
|
||||
Byte *data, size_t *outSize,
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
/* Lzma2Enc.h -- LZMA2 Encoder
|
||||
2017-07-27 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA2_ENC_H
|
||||
#define __LZMA2_ENC_H
|
||||
#ifndef ZIP7_INC_LZMA2_ENC_H
|
||||
#define ZIP7_INC_LZMA2_ENC_H
|
||||
|
||||
#include "LzmaEnc.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#define LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO 0
|
||||
#define LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID ((UInt64)(Int64)-1)
|
||||
#define LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO 0
|
||||
#define LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID ((UInt64)(Int64)-1)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
|
@ -36,7 +36,9 @@ SRes:
|
|||
SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
|
||||
*/
|
||||
|
||||
typedef void * CLzma2EncHandle;
|
||||
typedef struct CLzma2Enc CLzma2Enc;
|
||||
typedef CLzma2Enc * CLzma2EncHandle;
|
||||
// Z7_DECLARE_HANDLE(CLzma2EncHandle)
|
||||
|
||||
CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
void Lzma2Enc_Destroy(CLzma2EncHandle p);
|
||||
|
@ -44,11 +46,11 @@ SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props);
|
|||
void Lzma2Enc_SetDataSize(CLzma2EncHandle p, UInt64 expectedDataSiize);
|
||||
Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p);
|
||||
SRes Lzma2Enc_Encode2(CLzma2EncHandle p,
|
||||
ISeqOutStream *outStream,
|
||||
ISeqOutStreamPtr outStream,
|
||||
Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
const Byte *inData, size_t inDataSize,
|
||||
ICompressProgress *progress);
|
||||
ICompressProgressPtr progress);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Lzma86.h -- LZMA + x86 (BCJ) Filter
|
||||
2013-01-18 : Igor Pavlov : Public domain */
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA86_H
|
||||
#define __LZMA86_H
|
||||
#ifndef ZIP7_INC_LZMA86_H
|
||||
#define ZIP7_INC_LZMA86_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
/* LzmaDec.h -- LZMA Decoder
|
||||
2020-03-19 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA_DEC_H
|
||||
#define __LZMA_DEC_H
|
||||
#ifndef ZIP7_INC_LZMA_DEC_H
|
||||
#define ZIP7_INC_LZMA_DEC_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
/* #define _LZMA_PROB32 */
|
||||
/* _LZMA_PROB32 can increase the speed on some CPUs,
|
||||
/* #define Z7_LZMA_PROB32 */
|
||||
/* Z7_LZMA_PROB32 can increase the speed on some CPUs,
|
||||
but memory usage for CLzmaDec::probs will be doubled in that case */
|
||||
|
||||
typedef
|
||||
#ifdef _LZMA_PROB32
|
||||
#ifdef Z7_LZMA_PROB32
|
||||
UInt32
|
||||
#else
|
||||
UInt16
|
||||
|
@ -25,7 +25,7 @@ typedef
|
|||
|
||||
#define LZMA_PROPS_SIZE 5
|
||||
|
||||
typedef struct _CLzmaProps
|
||||
typedef struct
|
||||
{
|
||||
Byte lc;
|
||||
Byte lp;
|
||||
|
@ -73,7 +73,8 @@ typedef struct
|
|||
Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
|
||||
} CLzmaDec;
|
||||
|
||||
#define LzmaDec_Construct(p) { (p)->dic = NULL; (p)->probs = NULL; }
|
||||
#define LzmaDec_CONSTRUCT(p) { (p)->dic = NULL; (p)->probs = NULL; }
|
||||
#define LzmaDec_Construct(p) LzmaDec_CONSTRUCT(p)
|
||||
|
||||
void LzmaDec_Init(CLzmaDec *p);
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* LzmaEnc.h -- LZMA Encoder
|
||||
2019-10-30 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA_ENC_H
|
||||
#define __LZMA_ENC_H
|
||||
#ifndef ZIP7_INC_LZMA_ENC_H
|
||||
#define ZIP7_INC_LZMA_ENC_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -10,7 +10,7 @@ EXTERN_C_BEGIN
|
|||
|
||||
#define LZMA_PROPS_SIZE 5
|
||||
|
||||
typedef struct _CLzmaEncProps
|
||||
typedef struct
|
||||
{
|
||||
int level; /* 0 <= level <= 9 */
|
||||
UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
|
||||
|
@ -23,10 +23,13 @@ typedef struct _CLzmaEncProps
|
|||
int fb; /* 5 <= fb <= 273, default = 32 */
|
||||
int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
|
||||
int numHashBytes; /* 2, 3 or 4, default = 4 */
|
||||
unsigned numHashOutBits; /* default = ? */
|
||||
UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
|
||||
unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
|
||||
int numThreads; /* 1 or 2, default = 2 */
|
||||
|
||||
// int _pad;
|
||||
|
||||
UInt64 reduceSize; /* estimated size of data that will be compressed. default = (UInt64)(Int64)-1.
|
||||
Encoder uses this value to reduce dictionary size */
|
||||
|
||||
|
@ -51,7 +54,9 @@ SRes:
|
|||
SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
|
||||
*/
|
||||
|
||||
typedef void * CLzmaEncHandle;
|
||||
typedef struct CLzmaEnc CLzmaEnc;
|
||||
typedef CLzmaEnc * CLzmaEncHandle;
|
||||
// Z7_DECLARE_HANDLE(CLzmaEncHandle)
|
||||
|
||||
CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc);
|
||||
void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
|
@ -61,17 +66,17 @@ void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize);
|
|||
SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
|
||||
unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p);
|
||||
|
||||
SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
|
||||
ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
|
||||
ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
|
||||
int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
int writeEndMark, ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
|
||||
|
||||
/* ---------- One Call Interface ---------- */
|
||||
|
||||
SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
|
||||
const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
|
||||
ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
/* LzmaLib.h -- LZMA library interface
|
||||
2021-04-03 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __LZMA_LIB_H
|
||||
#define __LZMA_LIB_H
|
||||
#ifndef ZIP7_INC_LZMA_LIB_H
|
||||
#define ZIP7_INC_LZMA_LIB_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#define MY_STDAPI int MY_STD_CALL
|
||||
#define Z7_STDAPI int Z7_STDCALL
|
||||
|
||||
#define LZMA_PROPS_SIZE 5
|
||||
|
||||
|
@ -100,7 +100,7 @@ Returns:
|
|||
SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
|
||||
*/
|
||||
|
||||
MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
|
||||
Z7_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
|
||||
unsigned char *outProps, size_t *outPropsSize, /* *outPropsSize must be = 5 */
|
||||
int level, /* 0 <= level <= 9, default = 5 */
|
||||
unsigned dictSize, /* default = (1 << 24) */
|
||||
|
@ -130,7 +130,7 @@ Returns:
|
|||
SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer (src)
|
||||
*/
|
||||
|
||||
MY_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, SizeT *srcLen,
|
||||
Z7_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, SizeT *srcLen,
|
||||
const unsigned char *props, size_t propsSize);
|
||||
|
||||
EXTERN_C_END
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
/* MtCoder.h -- Multi-thread Coder
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_MT_CODER_H
|
||||
#define ZIP7_INC_MT_CODER_H
|
||||
|
||||
#include "MtDec.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
/*
|
||||
if ( defined MTCODER_USE_WRITE_THREAD) : main thread writes all data blocks to output stream
|
||||
if (not defined MTCODER_USE_WRITE_THREAD) : any coder thread can write data blocks to output stream
|
||||
*/
|
||||
/* #define MTCODER_USE_WRITE_THREAD */
|
||||
|
||||
#ifndef Z7_ST
|
||||
#define MTCODER_GET_NUM_BLOCKS_FROM_THREADS(numThreads) ((numThreads) + (numThreads) / 8 + 1)
|
||||
#define MTCODER_THREADS_MAX 64
|
||||
#define MTCODER_BLOCKS_MAX (MTCODER_GET_NUM_BLOCKS_FROM_THREADS(MTCODER_THREADS_MAX) + 3)
|
||||
#else
|
||||
#define MTCODER_THREADS_MAX 1
|
||||
#define MTCODER_BLOCKS_MAX 1
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef Z7_ST
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ICompressProgress vt;
|
||||
CMtProgress *mtProgress;
|
||||
UInt64 inSize;
|
||||
UInt64 outSize;
|
||||
} CMtProgressThunk;
|
||||
|
||||
void MtProgressThunk_CreateVTable(CMtProgressThunk *p);
|
||||
|
||||
#define MtProgressThunk_INIT(p) { (p)->inSize = 0; (p)->outSize = 0; }
|
||||
|
||||
|
||||
struct CMtCoder_;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct CMtCoder_ *mtCoder;
|
||||
unsigned index;
|
||||
int stop;
|
||||
Byte *inBuf;
|
||||
|
||||
CAutoResetEvent startEvent;
|
||||
CThread thread;
|
||||
} CMtCoderThread;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
SRes (*Code)(void *p, unsigned coderIndex, unsigned outBufIndex,
|
||||
const Byte *src, size_t srcSize, int finished);
|
||||
SRes (*Write)(void *p, unsigned outBufIndex);
|
||||
} IMtCoderCallback2;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
SRes res;
|
||||
unsigned bufIndex;
|
||||
BoolInt finished;
|
||||
} CMtCoderBlock;
|
||||
|
||||
|
||||
typedef struct CMtCoder_
|
||||
{
|
||||
/* input variables */
|
||||
|
||||
size_t blockSize; /* size of input block */
|
||||
unsigned numThreadsMax;
|
||||
UInt64 expectedDataSize;
|
||||
|
||||
ISeqInStreamPtr inStream;
|
||||
const Byte *inData;
|
||||
size_t inDataSize;
|
||||
|
||||
ICompressProgressPtr progress;
|
||||
ISzAllocPtr allocBig;
|
||||
|
||||
IMtCoderCallback2 *mtCallback;
|
||||
void *mtCallbackObject;
|
||||
|
||||
|
||||
/* internal variables */
|
||||
|
||||
size_t allocatedBufsSize;
|
||||
|
||||
CAutoResetEvent readEvent;
|
||||
CSemaphore blocksSemaphore;
|
||||
|
||||
BoolInt stopReading;
|
||||
SRes readRes;
|
||||
|
||||
#ifdef MTCODER_USE_WRITE_THREAD
|
||||
CAutoResetEvent writeEvents[MTCODER_BLOCKS_MAX];
|
||||
#else
|
||||
CAutoResetEvent finishedEvent;
|
||||
SRes writeRes;
|
||||
unsigned writeIndex;
|
||||
Byte ReadyBlocks[MTCODER_BLOCKS_MAX];
|
||||
LONG numFinishedThreads;
|
||||
#endif
|
||||
|
||||
unsigned numStartedThreadsLimit;
|
||||
unsigned numStartedThreads;
|
||||
|
||||
unsigned numBlocksMax;
|
||||
unsigned blockIndex;
|
||||
UInt64 readProcessed;
|
||||
|
||||
CCriticalSection cs;
|
||||
|
||||
unsigned freeBlockHead;
|
||||
unsigned freeBlockList[MTCODER_BLOCKS_MAX];
|
||||
|
||||
CMtProgress mtProgress;
|
||||
CMtCoderBlock blocks[MTCODER_BLOCKS_MAX];
|
||||
CMtCoderThread threads[MTCODER_THREADS_MAX];
|
||||
} CMtCoder;
|
||||
|
||||
|
||||
void MtCoder_Construct(CMtCoder *p);
|
||||
void MtCoder_Destruct(CMtCoder *p);
|
||||
SRes MtCoder_Code(CMtCoder *p);
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -0,0 +1,202 @@
|
|||
/* MtDec.h -- Multi-thread Decoder
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_MT_DEC_H
|
||||
#define ZIP7_INC_MT_DEC_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
#ifndef Z7_ST
|
||||
#include "Threads.h"
|
||||
#endif
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#ifndef Z7_ST
|
||||
|
||||
#ifndef Z7_ST
|
||||
#define MTDEC_THREADS_MAX 32
|
||||
#else
|
||||
#define MTDEC_THREADS_MAX 1
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
ICompressProgressPtr progress;
|
||||
SRes res;
|
||||
UInt64 totalInSize;
|
||||
UInt64 totalOutSize;
|
||||
CCriticalSection cs;
|
||||
} CMtProgress;
|
||||
|
||||
void MtProgress_Init(CMtProgress *p, ICompressProgressPtr progress);
|
||||
SRes MtProgress_Progress_ST(CMtProgress *p);
|
||||
SRes MtProgress_ProgressAdd(CMtProgress *p, UInt64 inSize, UInt64 outSize);
|
||||
SRes MtProgress_GetError(CMtProgress *p);
|
||||
void MtProgress_SetError(CMtProgress *p, SRes res);
|
||||
|
||||
struct CMtDec;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct CMtDec_ *mtDec;
|
||||
unsigned index;
|
||||
void *inBuf;
|
||||
|
||||
size_t inDataSize_Start; // size of input data in start block
|
||||
UInt64 inDataSize; // total size of input data in all blocks
|
||||
|
||||
CThread thread;
|
||||
CAutoResetEvent canRead;
|
||||
CAutoResetEvent canWrite;
|
||||
void *allocaPtr;
|
||||
} CMtDecThread;
|
||||
|
||||
void MtDecThread_FreeInBufs(CMtDecThread *t);
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
MTDEC_PARSE_CONTINUE, // continue this block with more input data
|
||||
MTDEC_PARSE_OVERFLOW, // MT buffers overflow, need switch to single-thread
|
||||
MTDEC_PARSE_NEW, // new block
|
||||
MTDEC_PARSE_END // end of block threading. But we still can return to threading after Write(&needContinue)
|
||||
} EMtDecParseState;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
// in
|
||||
int startCall;
|
||||
const Byte *src;
|
||||
size_t srcSize;
|
||||
// in : (srcSize == 0) is allowed
|
||||
// out : it's allowed to return less that actually was used ?
|
||||
int srcFinished;
|
||||
|
||||
// out
|
||||
EMtDecParseState state;
|
||||
BoolInt canCreateNewThread;
|
||||
UInt64 outPos; // check it (size_t)
|
||||
} CMtDecCallbackInfo;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
void (*Parse)(void *p, unsigned coderIndex, CMtDecCallbackInfo *ci);
|
||||
|
||||
// PreCode() and Code():
|
||||
// (SRes_return_result != SZ_OK) means stop decoding, no need another blocks
|
||||
SRes (*PreCode)(void *p, unsigned coderIndex);
|
||||
SRes (*Code)(void *p, unsigned coderIndex,
|
||||
const Byte *src, size_t srcSize, int srcFinished,
|
||||
UInt64 *inCodePos, UInt64 *outCodePos, int *stop);
|
||||
// stop - means stop another Code calls
|
||||
|
||||
|
||||
/* Write() must be called, if Parse() was called
|
||||
set (needWrite) if
|
||||
{
|
||||
&& (was not interrupted by progress)
|
||||
&& (was not interrupted in previous block)
|
||||
}
|
||||
|
||||
out:
|
||||
if (*needContinue), decoder still need to continue decoding with new iteration,
|
||||
even after MTDEC_PARSE_END
|
||||
if (*canRecode), we didn't flush current block data, so we still can decode current block later.
|
||||
*/
|
||||
SRes (*Write)(void *p, unsigned coderIndex,
|
||||
BoolInt needWriteToStream,
|
||||
const Byte *src, size_t srcSize, BoolInt isCross,
|
||||
// int srcFinished,
|
||||
BoolInt *needContinue,
|
||||
BoolInt *canRecode);
|
||||
|
||||
} IMtDecCallback2;
|
||||
|
||||
|
||||
|
||||
typedef struct CMtDec_
|
||||
{
|
||||
/* input variables */
|
||||
|
||||
size_t inBufSize; /* size of input block */
|
||||
unsigned numThreadsMax;
|
||||
// size_t inBlockMax;
|
||||
unsigned numThreadsMax_2;
|
||||
|
||||
ISeqInStreamPtr inStream;
|
||||
// const Byte *inData;
|
||||
// size_t inDataSize;
|
||||
|
||||
ICompressProgressPtr progress;
|
||||
ISzAllocPtr alloc;
|
||||
|
||||
IMtDecCallback2 *mtCallback;
|
||||
void *mtCallbackObject;
|
||||
|
||||
|
||||
/* internal variables */
|
||||
|
||||
size_t allocatedBufsSize;
|
||||
|
||||
BoolInt exitThread;
|
||||
WRes exitThreadWRes;
|
||||
|
||||
UInt64 blockIndex;
|
||||
BoolInt isAllocError;
|
||||
BoolInt overflow;
|
||||
SRes threadingErrorSRes;
|
||||
|
||||
BoolInt needContinue;
|
||||
|
||||
// CAutoResetEvent finishedEvent;
|
||||
|
||||
SRes readRes;
|
||||
SRes codeRes;
|
||||
|
||||
BoolInt wasInterrupted;
|
||||
|
||||
unsigned numStartedThreads_Limit;
|
||||
unsigned numStartedThreads;
|
||||
|
||||
Byte *crossBlock;
|
||||
size_t crossStart;
|
||||
size_t crossEnd;
|
||||
UInt64 readProcessed;
|
||||
BoolInt readWasFinished;
|
||||
UInt64 inProcessed;
|
||||
|
||||
unsigned filledThreadStart;
|
||||
unsigned numFilledThreads;
|
||||
|
||||
#ifndef Z7_ST
|
||||
BoolInt needInterrupt;
|
||||
UInt64 interruptIndex;
|
||||
CMtProgress mtProgress;
|
||||
CMtDecThread threads[MTDEC_THREADS_MAX];
|
||||
#endif
|
||||
} CMtDec;
|
||||
|
||||
|
||||
void MtDec_Construct(CMtDec *p);
|
||||
void MtDec_Destruct(CMtDec *p);
|
||||
|
||||
/*
|
||||
MtDec_Code() returns:
|
||||
SZ_OK - in most cases
|
||||
MY_SRes_HRESULT_FROM_WRes(WRes_error) - in case of unexpected error in threading function
|
||||
*/
|
||||
|
||||
SRes MtDec_Code(CMtDec *p);
|
||||
Byte *MtDec_GetCrossBuff(CMtDec *p);
|
||||
|
||||
int MtDec_PrepareRead(CMtDec *p);
|
||||
const Byte *MtDec_Read(CMtDec *p, size_t *inLim);
|
||||
|
||||
#endif
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -1,9 +1,9 @@
|
|||
/* Ppmd.h -- PPMD codec common code
|
||||
2021-04-13 : Igor Pavlov : Public domain
|
||||
2023-03-05 : Igor Pavlov : Public domain
|
||||
This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
|
||||
|
||||
#ifndef __PPMD_H
|
||||
#define __PPMD_H
|
||||
#ifndef ZIP7_INC_PPMD_H
|
||||
#define ZIP7_INC_PPMD_H
|
||||
|
||||
#include "CpuArch.h"
|
||||
|
||||
|
@ -48,8 +48,10 @@ typedef struct
|
|||
Byte Count; /* Count to next change of Shift */
|
||||
} CPpmd_See;
|
||||
|
||||
#define Ppmd_See_Update(p) if ((p)->Shift < PPMD_PERIOD_BITS && --(p)->Count == 0) \
|
||||
{ (p)->Summ = (UInt16)((p)->Summ << 1); (p)->Count = (Byte)(3 << (p)->Shift++); }
|
||||
#define Ppmd_See_UPDATE(p) \
|
||||
{ if ((p)->Shift < PPMD_PERIOD_BITS && --(p)->Count == 0) \
|
||||
{ (p)->Summ = (UInt16)((p)->Summ << 1); \
|
||||
(p)->Count = (Byte)(3 << (p)->Shift++); }}
|
||||
|
||||
|
||||
typedef struct
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
/* Ppmd7.h -- Ppmd7 (PPMdH) compression codec
|
||||
2021-04-13 : Igor Pavlov : Public domain
|
||||
2023-04-02 : Igor Pavlov : Public domain
|
||||
This code is based on:
|
||||
PPMd var.H (2001): Dmitry Shkarin : Public domain */
|
||||
|
||||
|
||||
#ifndef __PPMD7_H
|
||||
#define __PPMD7_H
|
||||
#ifndef ZIP7_INC_PPMD7_H
|
||||
#define ZIP7_INC_PPMD7_H
|
||||
|
||||
#include "Ppmd.h"
|
||||
|
||||
|
@ -55,7 +55,7 @@ typedef struct
|
|||
UInt32 Range;
|
||||
UInt32 Code;
|
||||
UInt32 Low;
|
||||
IByteIn *Stream;
|
||||
IByteInPtr Stream;
|
||||
} CPpmd7_RangeDec;
|
||||
|
||||
|
||||
|
@ -66,7 +66,7 @@ typedef struct
|
|||
// Byte _dummy_[3];
|
||||
UInt64 Low;
|
||||
UInt64 CacheSize;
|
||||
IByteOut *Stream;
|
||||
IByteOutPtr Stream;
|
||||
} CPpmd7z_RangeEnc;
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Precomp.h -- StdAfx
|
||||
2013-11-12 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_PRECOMP_H
|
||||
#define __7Z_PRECOMP_H
|
||||
#ifndef ZIP7_INC_PRECOMP_H
|
||||
#define ZIP7_INC_PRECOMP_H
|
||||
|
||||
#include "Compiler.h"
|
||||
/* #include "7zTypes.h" */
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
/* RotateDefs.h -- Rotate functions
|
||||
2015-03-25 : Igor Pavlov : Public domain */
|
||||
2023-06-18 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __ROTATE_DEFS_H
|
||||
#define __ROTATE_DEFS_H
|
||||
#ifndef ZIP7_INC_ROTATE_DEFS_H
|
||||
#define ZIP7_INC_ROTATE_DEFS_H
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
/* don't use _rotl with MINGW. It can insert slow call to function. */
|
||||
/* don't use _rotl with old MINGW. It can insert slow call to function. */
|
||||
|
||||
/* #if (_MSC_VER >= 1200) */
|
||||
#pragma intrinsic(_rotl)
|
||||
|
@ -18,12 +18,32 @@
|
|||
#define rotlFixed(x, n) _rotl((x), (n))
|
||||
#define rotrFixed(x, n) _rotr((x), (n))
|
||||
|
||||
#if (_MSC_VER >= 1300)
|
||||
#define Z7_ROTL64(x, n) _rotl64((x), (n))
|
||||
#define Z7_ROTR64(x, n) _rotr64((x), (n))
|
||||
#else
|
||||
#define Z7_ROTL64(x, n) (((x) << (n)) | ((x) >> (64 - (n))))
|
||||
#define Z7_ROTR64(x, n) (((x) >> (n)) | ((x) << (64 - (n))))
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/* new compilers can translate these macros to fast commands. */
|
||||
|
||||
#if defined(__clang__) && (__clang_major__ >= 4) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 5)
|
||||
/* GCC 4.9.0 and clang 3.5 can recognize more correct version: */
|
||||
#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (-(n) & 31)))
|
||||
#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (-(n) & 31)))
|
||||
#define Z7_ROTL64(x, n) (((x) << (n)) | ((x) >> (-(n) & 63)))
|
||||
#define Z7_ROTR64(x, n) (((x) >> (n)) | ((x) << (-(n) & 63)))
|
||||
#else
|
||||
/* for old GCC / clang: */
|
||||
#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
|
||||
#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (32 - (n))))
|
||||
#define Z7_ROTL64(x, n) (((x) << (n)) | ((x) >> (64 - (n))))
|
||||
#define Z7_ROTR64(x, n) (((x) >> (n)) | ((x) << (64 - (n))))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Sha256.h -- SHA-256 Hash
|
||||
2021-01-01 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __7Z_SHA256_H
|
||||
#define __7Z_SHA256_H
|
||||
#ifndef ZIP7_INC_SHA256_H
|
||||
#define ZIP7_INC_SHA256_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
|
@ -14,7 +14,7 @@ EXTERN_C_BEGIN
|
|||
#define SHA256_BLOCK_SIZE (SHA256_NUM_BLOCK_WORDS * 4)
|
||||
#define SHA256_DIGEST_SIZE (SHA256_NUM_DIGEST_WORDS * 4)
|
||||
|
||||
typedef void (MY_FAST_CALL *SHA256_FUNC_UPDATE_BLOCKS)(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
typedef void (Z7_FASTCALL *SHA256_FUNC_UPDATE_BLOCKS)(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
|
||||
/*
|
||||
if (the system supports different SHA256 code implementations)
|
||||
|
@ -34,7 +34,7 @@ typedef struct
|
|||
{
|
||||
SHA256_FUNC_UPDATE_BLOCKS func_UpdateBlocks;
|
||||
UInt64 count;
|
||||
UInt64 __pad_2[2];
|
||||
UInt64 _pad_2[2];
|
||||
UInt32 state[SHA256_NUM_DIGEST_WORDS];
|
||||
|
||||
Byte buffer[SHA256_BLOCK_SIZE];
|
||||
|
@ -62,7 +62,7 @@ void Sha256_Final(CSha256 *p, Byte *digest);
|
|||
|
||||
|
||||
|
||||
// void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
// void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
|
||||
/*
|
||||
call Sha256Prepare() once at program start.
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
/* Sort.h -- Sort functions
|
||||
2023-03-05 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_SORT_H
|
||||
#define ZIP7_INC_SORT_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
void HeapSort(UInt32 *p, size_t size);
|
||||
void HeapSort64(UInt64 *p, size_t size);
|
||||
|
||||
/* void HeapSortRef(UInt32 *p, UInt32 *vals, size_t size); */
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -0,0 +1,17 @@
|
|||
/* SwapBytes.h -- Byte Swap conversion filter
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_SWAP_BYTES_H
|
||||
#define ZIP7_INC_SWAP_BYTES_H
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
void z7_SwapBytes2(UInt16 *data, size_t numItems);
|
||||
void z7_SwapBytes4(UInt32 *data, size_t numItems);
|
||||
void z7_SwapBytesPrepare(void);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -0,0 +1,240 @@
|
|||
/* Threads.h -- multithreading library
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef ZIP7_INC_THREADS_H
|
||||
#define ZIP7_INC_THREADS_H
|
||||
|
||||
#ifdef _WIN32
|
||||
#include "7zWindows.h"
|
||||
|
||||
#else
|
||||
|
||||
#if defined(__linux__)
|
||||
#if !defined(__APPLE__) && !defined(_AIX) && !defined(__ANDROID__)
|
||||
#ifndef Z7_AFFINITY_DISABLE
|
||||
#define Z7_AFFINITY_SUPPORTED
|
||||
// #pragma message(" ==== Z7_AFFINITY_SUPPORTED")
|
||||
// #define _GNU_SOURCE
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
#endif
|
||||
|
||||
#include "7zTypes.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
WRes HandlePtr_Close(HANDLE *h);
|
||||
WRes Handle_WaitObject(HANDLE h);
|
||||
|
||||
typedef HANDLE CThread;
|
||||
|
||||
#define Thread_CONSTRUCT(p) { *(p) = NULL; }
|
||||
#define Thread_WasCreated(p) (*(p) != NULL)
|
||||
#define Thread_Close(p) HandlePtr_Close(p)
|
||||
// #define Thread_Wait(p) Handle_WaitObject(*(p))
|
||||
|
||||
#ifdef UNDER_CE
|
||||
// if (USE_THREADS_CreateThread is defined), we use _beginthreadex()
|
||||
// if (USE_THREADS_CreateThread is not definned), we use CreateThread()
|
||||
#define USE_THREADS_CreateThread
|
||||
#endif
|
||||
|
||||
typedef
|
||||
#ifdef USE_THREADS_CreateThread
|
||||
DWORD
|
||||
#else
|
||||
unsigned
|
||||
#endif
|
||||
THREAD_FUNC_RET_TYPE;
|
||||
|
||||
#define THREAD_FUNC_RET_ZERO 0
|
||||
|
||||
typedef DWORD_PTR CAffinityMask;
|
||||
typedef DWORD_PTR CCpuSet;
|
||||
|
||||
#define CpuSet_Zero(p) *(p) = (0)
|
||||
#define CpuSet_Set(p, cpu) *(p) |= ((DWORD_PTR)1 << (cpu))
|
||||
|
||||
#else // _WIN32
|
||||
|
||||
typedef struct
|
||||
{
|
||||
pthread_t _tid;
|
||||
int _created;
|
||||
} CThread;
|
||||
|
||||
#define Thread_CONSTRUCT(p) { (p)->_tid = 0; (p)->_created = 0; }
|
||||
#define Thread_WasCreated(p) ((p)->_created != 0)
|
||||
WRes Thread_Close(CThread *p);
|
||||
// #define Thread_Wait Thread_Wait_Close
|
||||
|
||||
typedef void * THREAD_FUNC_RET_TYPE;
|
||||
#define THREAD_FUNC_RET_ZERO NULL
|
||||
|
||||
|
||||
typedef UInt64 CAffinityMask;
|
||||
|
||||
#ifdef Z7_AFFINITY_SUPPORTED
|
||||
|
||||
typedef cpu_set_t CCpuSet;
|
||||
#define CpuSet_Zero(p) CPU_ZERO(p)
|
||||
#define CpuSet_Set(p, cpu) CPU_SET(cpu, p)
|
||||
#define CpuSet_IsSet(p, cpu) CPU_ISSET(cpu, p)
|
||||
|
||||
#else
|
||||
|
||||
typedef UInt64 CCpuSet;
|
||||
#define CpuSet_Zero(p) *(p) = (0)
|
||||
#define CpuSet_Set(p, cpu) *(p) |= ((UInt64)1 << (cpu))
|
||||
#define CpuSet_IsSet(p, cpu) ((*(p) & ((UInt64)1 << (cpu))) != 0)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
|
||||
#define THREAD_FUNC_CALL_TYPE Z7_STDCALL
|
||||
|
||||
#if defined(_WIN32) && defined(__GNUC__)
|
||||
/* GCC compiler for x86 32-bit uses the rule:
|
||||
the stack is 16-byte aligned before CALL instruction for function calling.
|
||||
But only root function main() contains instructions that
|
||||
set 16-byte alignment for stack pointer. And another functions
|
||||
just keep alignment, if it was set in some parent function.
|
||||
|
||||
The problem:
|
||||
if we create new thread in MinGW (GCC) 32-bit x86 via _beginthreadex() or CreateThread(),
|
||||
the root function of thread doesn't set 16-byte alignment.
|
||||
And stack frames in all child functions also will be unaligned in that case.
|
||||
|
||||
Here we set (force_align_arg_pointer) attribute for root function of new thread.
|
||||
Do we need (force_align_arg_pointer) also for another systems? */
|
||||
|
||||
#define THREAD_FUNC_ATTRIB_ALIGN_ARG __attribute__((force_align_arg_pointer))
|
||||
// #define THREAD_FUNC_ATTRIB_ALIGN_ARG // for debug : bad alignment in SSE functions
|
||||
#else
|
||||
#define THREAD_FUNC_ATTRIB_ALIGN_ARG
|
||||
#endif
|
||||
|
||||
#define THREAD_FUNC_DECL THREAD_FUNC_ATTRIB_ALIGN_ARG THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
|
||||
|
||||
typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *);
|
||||
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param);
|
||||
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity);
|
||||
WRes Thread_Wait_Close(CThread *p);
|
||||
|
||||
#ifdef _WIN32
|
||||
#define Thread_Create_With_CpuSet(p, func, param, cs) \
|
||||
Thread_Create_With_Affinity(p, func, param, *cs)
|
||||
#else
|
||||
WRes Thread_Create_With_CpuSet(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, const CCpuSet *cpuSet);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
typedef HANDLE CEvent;
|
||||
typedef CEvent CAutoResetEvent;
|
||||
typedef CEvent CManualResetEvent;
|
||||
#define Event_Construct(p) *(p) = NULL
|
||||
#define Event_IsCreated(p) (*(p) != NULL)
|
||||
#define Event_Close(p) HandlePtr_Close(p)
|
||||
#define Event_Wait(p) Handle_WaitObject(*(p))
|
||||
WRes Event_Set(CEvent *p);
|
||||
WRes Event_Reset(CEvent *p);
|
||||
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
|
||||
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
|
||||
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
|
||||
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
|
||||
|
||||
typedef HANDLE CSemaphore;
|
||||
#define Semaphore_Construct(p) *(p) = NULL
|
||||
#define Semaphore_IsCreated(p) (*(p) != NULL)
|
||||
#define Semaphore_Close(p) HandlePtr_Close(p)
|
||||
#define Semaphore_Wait(p) Handle_WaitObject(*(p))
|
||||
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
|
||||
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
|
||||
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
|
||||
WRes Semaphore_Release1(CSemaphore *p);
|
||||
|
||||
typedef CRITICAL_SECTION CCriticalSection;
|
||||
WRes CriticalSection_Init(CCriticalSection *p);
|
||||
#define CriticalSection_Delete(p) DeleteCriticalSection(p)
|
||||
#define CriticalSection_Enter(p) EnterCriticalSection(p)
|
||||
#define CriticalSection_Leave(p) LeaveCriticalSection(p)
|
||||
|
||||
|
||||
#else // _WIN32
|
||||
|
||||
typedef struct _CEvent
|
||||
{
|
||||
int _created;
|
||||
int _manual_reset;
|
||||
int _state;
|
||||
pthread_mutex_t _mutex;
|
||||
pthread_cond_t _cond;
|
||||
} CEvent;
|
||||
|
||||
typedef CEvent CAutoResetEvent;
|
||||
typedef CEvent CManualResetEvent;
|
||||
|
||||
#define Event_Construct(p) (p)->_created = 0
|
||||
#define Event_IsCreated(p) ((p)->_created)
|
||||
|
||||
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
|
||||
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
|
||||
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
|
||||
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
|
||||
|
||||
WRes Event_Set(CEvent *p);
|
||||
WRes Event_Reset(CEvent *p);
|
||||
WRes Event_Wait(CEvent *p);
|
||||
WRes Event_Close(CEvent *p);
|
||||
|
||||
|
||||
typedef struct _CSemaphore
|
||||
{
|
||||
int _created;
|
||||
UInt32 _count;
|
||||
UInt32 _maxCount;
|
||||
pthread_mutex_t _mutex;
|
||||
pthread_cond_t _cond;
|
||||
} CSemaphore;
|
||||
|
||||
#define Semaphore_Construct(p) (p)->_created = 0
|
||||
#define Semaphore_IsCreated(p) ((p)->_created)
|
||||
|
||||
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
|
||||
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
|
||||
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
|
||||
#define Semaphore_Release1(p) Semaphore_ReleaseN(p, 1)
|
||||
WRes Semaphore_Wait(CSemaphore *p);
|
||||
WRes Semaphore_Close(CSemaphore *p);
|
||||
|
||||
|
||||
typedef struct _CCriticalSection
|
||||
{
|
||||
pthread_mutex_t _mutex;
|
||||
} CCriticalSection;
|
||||
|
||||
WRes CriticalSection_Init(CCriticalSection *p);
|
||||
void CriticalSection_Delete(CCriticalSection *cs);
|
||||
void CriticalSection_Enter(CCriticalSection *cs);
|
||||
void CriticalSection_Leave(CCriticalSection *cs);
|
||||
|
||||
LONG InterlockedIncrement(LONG volatile *addend);
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
WRes AutoResetEvent_OptCreate_And_Reset(CAutoResetEvent *p);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
#endif
|
|
@ -1,21 +1,23 @@
|
|||
/* Xz.h - Xz interface
|
||||
2021-04-01 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __XZ_H
|
||||
#define __XZ_H
|
||||
#ifndef ZIP7_INC_XZ_H
|
||||
#define ZIP7_INC_XZ_H
|
||||
|
||||
#include "Sha256.h"
|
||||
#include "Delta.h"
|
||||
|
||||
EXTERN_C_BEGIN
|
||||
|
||||
#define XZ_ID_Subblock 1
|
||||
#define XZ_ID_Delta 3
|
||||
#define XZ_ID_X86 4
|
||||
#define XZ_ID_PPC 5
|
||||
#define XZ_ID_IA64 6
|
||||
#define XZ_ID_ARM 7
|
||||
#define XZ_ID_ARMT 8
|
||||
#define XZ_ID_X86 4
|
||||
#define XZ_ID_PPC 5
|
||||
#define XZ_ID_IA64 6
|
||||
#define XZ_ID_ARM 7
|
||||
#define XZ_ID_ARMT 8
|
||||
#define XZ_ID_SPARC 9
|
||||
#define XZ_ID_ARM64 0xa
|
||||
#define XZ_ID_LZMA2 0x21
|
||||
|
||||
unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value);
|
||||
|
@ -53,7 +55,7 @@ typedef struct
|
|||
#define XzBlock_HasUnsupportedFlags(p) (((p)->flags & ~(XZ_BF_NUM_FILTERS_MASK | XZ_BF_PACK_SIZE | XZ_BF_UNPACK_SIZE)) != 0)
|
||||
|
||||
SRes XzBlock_Parse(CXzBlock *p, const Byte *header);
|
||||
SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, BoolInt *isIndex, UInt32 *headerSizeRes);
|
||||
SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStreamPtr inStream, BoolInt *isIndex, UInt32 *headerSizeRes);
|
||||
|
||||
/* ---------- xz stream ---------- */
|
||||
|
||||
|
@ -101,7 +103,7 @@ typedef UInt16 CXzStreamFlags;
|
|||
unsigned XzFlags_GetCheckSize(CXzStreamFlags f);
|
||||
|
||||
SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf);
|
||||
SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream);
|
||||
SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStreamPtr inStream);
|
||||
|
||||
typedef struct
|
||||
{
|
||||
|
@ -112,6 +114,7 @@ typedef struct
|
|||
typedef struct
|
||||
{
|
||||
CXzStreamFlags flags;
|
||||
// Byte _pad[6];
|
||||
size_t numBlocks;
|
||||
CXzBlockSizes *blocks;
|
||||
UInt64 startOffset;
|
||||
|
@ -134,7 +137,7 @@ typedef struct
|
|||
|
||||
void Xzs_Construct(CXzs *p);
|
||||
void Xzs_Free(CXzs *p, ISzAllocPtr alloc);
|
||||
SRes Xzs_ReadBackward(CXzs *p, ILookInStream *inStream, Int64 *startOffset, ICompressProgress *progress, ISzAllocPtr alloc);
|
||||
SRes Xzs_ReadBackward(CXzs *p, ILookInStreamPtr inStream, Int64 *startOffset, ICompressProgressPtr progress, ISzAllocPtr alloc);
|
||||
|
||||
UInt64 Xzs_GetNumBlocks(const CXzs *p);
|
||||
UInt64 Xzs_GetUnpackSize(const CXzs *p);
|
||||
|
@ -160,9 +163,9 @@ typedef enum
|
|||
} ECoderFinishMode;
|
||||
|
||||
|
||||
typedef struct _IStateCoder
|
||||
typedef struct
|
||||
{
|
||||
void *p;
|
||||
void *p; // state object;
|
||||
void (*Free)(void *p, ISzAllocPtr alloc);
|
||||
SRes (*SetProps)(void *p, const Byte *props, size_t propSize, ISzAllocPtr alloc);
|
||||
void (*Init)(void *p);
|
||||
|
@ -174,6 +177,20 @@ typedef struct _IStateCoder
|
|||
} IStateCoder;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
UInt32 methodId;
|
||||
UInt32 delta;
|
||||
UInt32 ip;
|
||||
UInt32 X86_State;
|
||||
Byte delta_State[DELTA_STATE_SIZE];
|
||||
} CXzBcFilterStateBase;
|
||||
|
||||
typedef SizeT (*Xz_Func_BcFilterStateBase_Filter)(CXzBcFilterStateBase *p, Byte *data, SizeT size);
|
||||
|
||||
SRes Xz_StateCoder_Bc_SetFromMethod_Func(IStateCoder *p, UInt64 id,
|
||||
Xz_Func_BcFilterStateBase_Filter func, ISzAllocPtr alloc);
|
||||
|
||||
|
||||
#define MIXCODER_NUM_FILTERS_MAX 4
|
||||
|
||||
|
@ -422,7 +439,7 @@ typedef struct
|
|||
size_t outStep_ST; // size of output buffer for Single-Thread decoding
|
||||
BoolInt ignoreErrors; // if set to 1, the decoder can ignore some errors and it skips broken parts of data.
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
unsigned numThreads; // the number of threads for Multi-Thread decoding. if (umThreads == 1) it will use Single-thread decoding
|
||||
size_t inBufSize_MT; // size of small input data buffers for Multi-Thread decoding. Big number of such small buffers can be created
|
||||
size_t memUseMax; // the limit of total memory usage for Multi-Thread decoding.
|
||||
|
@ -432,8 +449,9 @@ typedef struct
|
|||
|
||||
void XzDecMtProps_Init(CXzDecMtProps *p);
|
||||
|
||||
|
||||
typedef void * CXzDecMtHandle;
|
||||
typedef struct CXzDecMt CXzDecMt;
|
||||
typedef CXzDecMt * CXzDecMtHandle;
|
||||
// Z7_DECLARE_HANDLE(CXzDecMtHandle)
|
||||
|
||||
/*
|
||||
alloc : XzDecMt uses CAlignOffsetAlloc internally for addresses allocated by (alloc).
|
||||
|
@ -503,14 +521,14 @@ SRes XzDecMt_Decode(CXzDecMtHandle p,
|
|||
const CXzDecMtProps *props,
|
||||
const UInt64 *outDataSize, // NULL means undefined
|
||||
int finishMode, // 0 - partial unpacking is allowed, 1 - xz stream(s) must be finished
|
||||
ISeqOutStream *outStream,
|
||||
ISeqOutStreamPtr outStream,
|
||||
// Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
// const Byte *inData, size_t inDataSize,
|
||||
CXzStatInfo *stat, // out: decoding results and statistics
|
||||
int *isMT, // out: 0 means that ST (Single-Thread) version was used
|
||||
// 1 means that MT (Multi-Thread) version was used
|
||||
ICompressProgress *progress);
|
||||
ICompressProgressPtr progress);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* XzCrc64.h -- CRC64 calculation
|
||||
2013-01-18 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __XZ_CRC64_H
|
||||
#define __XZ_CRC64_H
|
||||
#ifndef ZIP7_INC_XZ_CRC64_H
|
||||
#define ZIP7_INC_XZ_CRC64_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
|
@ -12,14 +12,14 @@ EXTERN_C_BEGIN
|
|||
|
||||
extern UInt64 g_Crc64Table[];
|
||||
|
||||
void MY_FAST_CALL Crc64GenerateTable(void);
|
||||
void Z7_FASTCALL Crc64GenerateTable(void);
|
||||
|
||||
#define CRC64_INIT_VAL UINT64_CONST(0xFFFFFFFFFFFFFFFF)
|
||||
#define CRC64_GET_DIGEST(crc) ((crc) ^ CRC64_INIT_VAL)
|
||||
#define CRC64_UPDATE_BYTE(crc, b) (g_Crc64Table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
|
||||
|
||||
UInt64 MY_FAST_CALL Crc64Update(UInt64 crc, const void *data, size_t size);
|
||||
UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size);
|
||||
UInt64 Z7_FASTCALL Crc64Update(UInt64 crc, const void *data, size_t size);
|
||||
UInt64 Z7_FASTCALL Crc64Calc(const void *data, size_t size);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* XzEnc.h -- Xz Encode
|
||||
2017-06-27 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#ifndef __XZ_ENC_H
|
||||
#define __XZ_ENC_H
|
||||
#ifndef ZIP7_INC_XZ_ENC_H
|
||||
#define ZIP7_INC_XZ_ENC_H
|
||||
|
||||
#include "Lzma2Enc.h"
|
||||
|
||||
|
@ -11,8 +11,8 @@
|
|||
EXTERN_C_BEGIN
|
||||
|
||||
|
||||
#define XZ_PROPS__BLOCK_SIZE__AUTO LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO
|
||||
#define XZ_PROPS__BLOCK_SIZE__SOLID LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
|
||||
#define XZ_PROPS_BLOCK_SIZE_AUTO LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO
|
||||
#define XZ_PROPS_BLOCK_SIZE_SOLID LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
|
||||
|
||||
|
||||
typedef struct
|
||||
|
@ -41,19 +41,20 @@ typedef struct
|
|||
|
||||
void XzProps_Init(CXzProps *p);
|
||||
|
||||
|
||||
typedef void * CXzEncHandle;
|
||||
typedef struct CXzEnc CXzEnc;
|
||||
typedef CXzEnc * CXzEncHandle;
|
||||
// Z7_DECLARE_HANDLE(CXzEncHandle)
|
||||
|
||||
CXzEncHandle XzEnc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
void XzEnc_Destroy(CXzEncHandle p);
|
||||
SRes XzEnc_SetProps(CXzEncHandle p, const CXzProps *props);
|
||||
void XzEnc_SetDataSize(CXzEncHandle p, UInt64 expectedDataSiize);
|
||||
SRes XzEnc_Encode(CXzEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress);
|
||||
SRes XzEnc_Encode(CXzEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress);
|
||||
|
||||
SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream,
|
||||
const CXzProps *props, ICompressProgress *progress);
|
||||
SRes Xz_Encode(ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
|
||||
const CXzProps *props, ICompressProgressPtr progress);
|
||||
|
||||
SRes Xz_EncodeEmpty(ISeqOutStream *outStream);
|
||||
SRes Xz_EncodeEmpty(ISeqOutStreamPtr outStream);
|
||||
|
||||
EXTERN_C_END
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
<ClCompile Include="src\7zDec.c" />
|
||||
<ClCompile Include="src\7zFile.c" />
|
||||
<ClCompile Include="src\7zStream.c" />
|
||||
<ClCompile Include="src/Aes.c" />
|
||||
<ClCompile Include="src/AesOpt.c" />
|
||||
<ClCompile Include="src\Alloc.c" />
|
||||
<ClCompile Include="src\Bcj2.c" />
|
||||
<ClCompile Include="src\Bcj2Enc.c" />
|
||||
|
@ -46,8 +48,10 @@
|
|||
<ClCompile Include="src\Bra86.c" />
|
||||
<ClCompile Include="src\BraIA64.c" />
|
||||
<ClCompile Include="src\CpuArch.c" />
|
||||
<ClCompile Include="src/DllSecur.c" />
|
||||
<ClCompile Include="src\Delta.c" />
|
||||
<ClCompile Include="src\LzFind.c" />
|
||||
<ClCompile Include="src/LzFindMt.c" />
|
||||
<ClCompile Include="src\LzFindOpt.c" />
|
||||
<ClCompile Include="src\Lzma2Dec.c" />
|
||||
<ClCompile Include="src\Lzma2DecMt.c" />
|
||||
|
@ -57,11 +61,16 @@
|
|||
<ClCompile Include="src\LzmaDec.c" />
|
||||
<ClCompile Include="src\LzmaEnc.c" />
|
||||
<ClCompile Include="src\LzmaLib.c" />
|
||||
<ClCompile Include="src/MtCoder.c" />
|
||||
<ClCompile Include="src/MtDec.c" />
|
||||
<ClCompile Include="src\Ppmd7.c" />
|
||||
<ClCompile Include="src\Ppmd7Dec.c" />
|
||||
<ClCompile Include="src\Ppmd7Enc.c" />
|
||||
<ClCompile Include="src\Sha256.c" />
|
||||
<ClCompile Include="src\Sha256Opt.c" />
|
||||
<ClCompile Include="src/Sort.c" />
|
||||
<ClCompile Include="src/SwapBytes.c" />
|
||||
<ClCompile Include="src/Threads.c" />
|
||||
<ClCompile Include="src\Xz.c" />
|
||||
<ClCompile Include="src\XzCrc64.c" />
|
||||
<ClCompile Include="src\XzCrc64Opt.c" />
|
||||
|
@ -77,13 +86,17 @@
|
|||
<ClInclude Include="include\7zFile.h" />
|
||||
<ClInclude Include="include\7zTypes.h" />
|
||||
<ClInclude Include="include\7zVersion.h" />
|
||||
<ClInclude Include="include/7zWindows.h" />
|
||||
<ClInclude Include="include\Alloc.h" />
|
||||
<ClInclude Include="include/Aes.h" />
|
||||
<ClInclude Include="include\Bcj2.h" />
|
||||
<ClInclude Include="include\Bra.h" />
|
||||
<ClInclude Include="include\Compiler.h" />
|
||||
<ClInclude Include="include\CpuArch.h" />
|
||||
<ClInclude Include="include\Delta.h" />
|
||||
<ClInclude Include="include/DllSecur.h" />
|
||||
<ClInclude Include="include\LzFind.h" />
|
||||
<ClInclude Include="include/LzFindMt.h" />
|
||||
<ClInclude Include="include\LzHash.h" />
|
||||
<ClInclude Include="include\Lzma2Dec.h" />
|
||||
<ClInclude Include="include\Lzma2DecMt.h" />
|
||||
|
@ -92,11 +105,16 @@
|
|||
<ClInclude Include="include\LzmaDec.h" />
|
||||
<ClInclude Include="include\LzmaEnc.h" />
|
||||
<ClInclude Include="include\LzmaLib.h" />
|
||||
<ClInclude Include="include/MtCoder.h" />
|
||||
<ClInclude Include="include/MtDec.h" />
|
||||
<ClInclude Include="include\Ppmd.h" />
|
||||
<ClInclude Include="include\Ppmd7.h" />
|
||||
<ClInclude Include="include\Precomp.h" />
|
||||
<ClInclude Include="include\RotateDefs.h" />
|
||||
<ClInclude Include="include\Sha256.h" />
|
||||
<ClInclude Include="include/Sort.h" />
|
||||
<ClInclude Include="include/SwapBytes.h" />
|
||||
<ClInclude Include="include/Threads.h" />
|
||||
<ClInclude Include="include\Xz.h" />
|
||||
<ClInclude Include="include\XzCrc64.h" />
|
||||
<ClInclude Include="include\XzEnc.h" />
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* 7zAlloc.c -- Allocation functions
|
||||
2017-04-03 : Igor Pavlov : Public domain */
|
||||
/* 7zAlloc.c -- Allocation functions for 7z processing
|
||||
2023-03-04 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -7,74 +7,83 @@
|
|||
|
||||
#include "7zAlloc.h"
|
||||
|
||||
/* #define _SZ_ALLOC_DEBUG */
|
||||
/* use _SZ_ALLOC_DEBUG to debug alloc/free operations */
|
||||
/* #define SZ_ALLOC_DEBUG */
|
||||
/* use SZ_ALLOC_DEBUG to debug alloc/free operations */
|
||||
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
|
||||
/*
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#include "7zWindows.h"
|
||||
#endif
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
int g_allocCount = 0;
|
||||
int g_allocCountTemp = 0;
|
||||
static int g_allocCount = 0;
|
||||
static int g_allocCountTemp = 0;
|
||||
|
||||
static void Print_Alloc(const char *s, size_t size, int *counter)
|
||||
{
|
||||
const unsigned size2 = (unsigned)size;
|
||||
fprintf(stderr, "\n%s count = %10d : %10u bytes; ", s, *counter, size2);
|
||||
(*counter)++;
|
||||
}
|
||||
static void Print_Free(const char *s, int *counter)
|
||||
{
|
||||
(*counter)--;
|
||||
fprintf(stderr, "\n%s count = %10d", s, *counter);
|
||||
}
|
||||
#endif
|
||||
|
||||
void *SzAlloc(ISzAllocPtr p, size_t size)
|
||||
{
|
||||
UNUSED_VAR(p);
|
||||
UNUSED_VAR(p)
|
||||
if (size == 0)
|
||||
return 0;
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
fprintf(stderr, "\nAlloc %10u bytes; count = %10d", (unsigned)size, g_allocCount);
|
||||
g_allocCount++;
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
Print_Alloc("Alloc", size, &g_allocCount);
|
||||
#endif
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void SzFree(ISzAllocPtr p, void *address)
|
||||
{
|
||||
UNUSED_VAR(p);
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
if (address != 0)
|
||||
{
|
||||
g_allocCount--;
|
||||
fprintf(stderr, "\nFree; count = %10d", g_allocCount);
|
||||
}
|
||||
UNUSED_VAR(p)
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
if (address)
|
||||
Print_Free("Free ", &g_allocCount);
|
||||
#endif
|
||||
free(address);
|
||||
}
|
||||
|
||||
void *SzAllocTemp(ISzAllocPtr p, size_t size)
|
||||
{
|
||||
UNUSED_VAR(p);
|
||||
UNUSED_VAR(p)
|
||||
if (size == 0)
|
||||
return 0;
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
fprintf(stderr, "\nAlloc_temp %10u bytes; count = %10d", (unsigned)size, g_allocCountTemp);
|
||||
g_allocCountTemp++;
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
Print_Alloc("Alloc_temp", size, &g_allocCountTemp);
|
||||
/*
|
||||
#ifdef _WIN32
|
||||
return HeapAlloc(GetProcessHeap(), 0, size);
|
||||
#endif
|
||||
*/
|
||||
#endif
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void SzFreeTemp(ISzAllocPtr p, void *address)
|
||||
{
|
||||
UNUSED_VAR(p);
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
if (address != 0)
|
||||
{
|
||||
g_allocCountTemp--;
|
||||
fprintf(stderr, "\nFree_temp; count = %10d", g_allocCountTemp);
|
||||
}
|
||||
UNUSED_VAR(p)
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
if (address)
|
||||
Print_Free("Free_temp ", &g_allocCountTemp);
|
||||
/*
|
||||
#ifdef _WIN32
|
||||
HeapFree(GetProcessHeap(), 0, address);
|
||||
return;
|
||||
#endif
|
||||
*/
|
||||
#endif
|
||||
free(address);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/* 7zCrc.c -- CRC32 init
|
||||
2021-04-01 : Igor Pavlov : Public domain */
|
||||
/* 7zCrc.c -- CRC32 calculation and init
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -13,22 +13,20 @@
|
|||
#else
|
||||
#define CRC_NUM_TABLES 9
|
||||
|
||||
#define CRC_UINT32_SWAP(v) ((v >> 24) | ((v >> 8) & 0xFF00) | ((v << 8) & 0xFF0000) | (v << 24))
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 MY_FAST_CALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
#endif
|
||||
|
||||
#ifndef MY_CPU_BE
|
||||
UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
#endif
|
||||
|
||||
typedef UInt32 (MY_FAST_CALL *CRC_FUNC)(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
|
||||
/*
|
||||
extern
|
||||
CRC_FUNC g_CrcUpdateT4;
|
||||
CRC_FUNC g_CrcUpdateT4;
|
||||
*/
|
||||
extern
|
||||
CRC_FUNC g_CrcUpdateT8;
|
||||
CRC_FUNC g_CrcUpdateT8;
|
||||
|
@ -44,20 +42,22 @@ CRC_FUNC g_CrcUpdate;
|
|||
|
||||
UInt32 g_CrcTable[256 * CRC_NUM_TABLES];
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdate(UInt32 v, const void *data, size_t size)
|
||||
UInt32 Z7_FASTCALL CrcUpdate(UInt32 v, const void *data, size_t size)
|
||||
{
|
||||
return g_CrcUpdate(v, data, size, g_CrcTable);
|
||||
}
|
||||
|
||||
UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size)
|
||||
UInt32 Z7_FASTCALL CrcCalc(const void *data, size_t size)
|
||||
{
|
||||
return g_CrcUpdate(CRC_INIT_VAL, data, size, g_CrcTable) ^ CRC_INIT_VAL;
|
||||
}
|
||||
|
||||
#if CRC_NUM_TABLES < 4 \
|
||||
|| (CRC_NUM_TABLES == 4 && defined(MY_CPU_BE)) \
|
||||
|| (!defined(MY_CPU_LE) && !defined(MY_CPU_BE))
|
||||
#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
const Byte *pEnd = p + size;
|
||||
|
@ -65,7 +65,7 @@ UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const U
|
|||
v = CRC_UPDATE_BYTE_2(v, *p);
|
||||
return v;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ---------- hardware CRC ---------- */
|
||||
|
||||
|
@ -78,16 +78,29 @@ UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const U
|
|||
#if defined(_MSC_VER)
|
||||
#if defined(MY_CPU_ARM64)
|
||||
#if (_MSC_VER >= 1910)
|
||||
#ifndef __clang__
|
||||
#define USE_ARM64_CRC
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#elif (defined(__clang__) && (__clang_major__ >= 3)) \
|
||||
|| (defined(__GNUC__) && (__GNUC__ > 4))
|
||||
#if !defined(__ARM_FEATURE_CRC32)
|
||||
#define __ARM_FEATURE_CRC32 1
|
||||
#if (!defined(__clang__) || (__clang_major__ > 3)) // fix these numbers
|
||||
#if defined(__clang__)
|
||||
#if defined(MY_CPU_ARM64)
|
||||
#define ATTRIB_CRC __attribute__((__target__("crc")))
|
||||
#else
|
||||
#define ATTRIB_CRC __attribute__((__target__("armv8-a,crc")))
|
||||
#endif
|
||||
#else
|
||||
#if defined(MY_CPU_ARM64)
|
||||
#define ATTRIB_CRC __attribute__((__target__("+crc")))
|
||||
#else
|
||||
#define ATTRIB_CRC __attribute__((__target__("arch=armv8-a+crc")))
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#if defined(__ARM_FEATURE_CRC32)
|
||||
#define USE_ARM64_CRC
|
||||
|
@ -105,7 +118,7 @@ UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const U
|
|||
|
||||
#pragma message("ARM64 CRC emulation")
|
||||
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
UInt32 __crc32b(UInt32 v, UInt32 data)
|
||||
{
|
||||
const UInt32 *table = g_CrcTable;
|
||||
|
@ -113,7 +126,7 @@ UInt32 __crc32b(UInt32 v, UInt32 data)
|
|||
return v;
|
||||
}
|
||||
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
UInt32 __crc32w(UInt32 v, UInt32 data)
|
||||
{
|
||||
const UInt32 *table = g_CrcTable;
|
||||
|
@ -124,7 +137,7 @@ UInt32 __crc32w(UInt32 v, UInt32 data)
|
|||
return v;
|
||||
}
|
||||
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
UInt32 __crc32d(UInt32 v, UInt64 data)
|
||||
{
|
||||
const UInt32 *table = g_CrcTable;
|
||||
|
@ -156,9 +169,9 @@ UInt32 __crc32d(UInt32 v, UInt64 data)
|
|||
// #pragma message("USE ARM HW CRC")
|
||||
|
||||
ATTRIB_CRC
|
||||
UInt32 MY_FAST_CALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
ATTRIB_CRC
|
||||
UInt32 MY_FAST_CALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
UNUSED_VAR(table);
|
||||
|
@ -188,9 +201,9 @@ UInt32 MY_FAST_CALL CrcUpdateT0_32(UInt32 v, const void *data, size_t size, cons
|
|||
}
|
||||
|
||||
ATTRIB_CRC
|
||||
UInt32 MY_FAST_CALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
ATTRIB_CRC
|
||||
UInt32 MY_FAST_CALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
UNUSED_VAR(table);
|
||||
|
@ -219,6 +232,9 @@ UInt32 MY_FAST_CALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, cons
|
|||
return v;
|
||||
}
|
||||
|
||||
#undef T0_32_UNROLL_BYTES
|
||||
#undef T0_64_UNROLL_BYTES
|
||||
|
||||
#endif // defined(USE_ARM64_CRC) || defined(USE_CRC_EMU)
|
||||
|
||||
#endif // MY_CPU_LE
|
||||
|
@ -226,7 +242,7 @@ UInt32 MY_FAST_CALL CrcUpdateT0_64(UInt32 v, const void *data, size_t size, cons
|
|||
|
||||
|
||||
|
||||
void MY_FAST_CALL CrcGenerateTable()
|
||||
void Z7_FASTCALL CrcGenerateTable(void)
|
||||
{
|
||||
UInt32 i;
|
||||
for (i = 0; i < 256; i++)
|
||||
|
@ -239,64 +255,62 @@ void MY_FAST_CALL CrcGenerateTable()
|
|||
}
|
||||
for (i = 256; i < 256 * CRC_NUM_TABLES; i++)
|
||||
{
|
||||
UInt32 r = g_CrcTable[(size_t)i - 256];
|
||||
const UInt32 r = g_CrcTable[(size_t)i - 256];
|
||||
g_CrcTable[i] = g_CrcTable[r & 0xFF] ^ (r >> 8);
|
||||
}
|
||||
|
||||
#if CRC_NUM_TABLES < 4
|
||||
|
||||
g_CrcUpdate = CrcUpdateT1;
|
||||
|
||||
#else
|
||||
|
||||
#ifdef MY_CPU_LE
|
||||
|
||||
g_CrcUpdateT4 = CrcUpdateT4;
|
||||
g_CrcUpdate = CrcUpdateT4;
|
||||
|
||||
#if CRC_NUM_TABLES >= 8
|
||||
g_CrcUpdate = CrcUpdateT1;
|
||||
#elif defined(MY_CPU_LE)
|
||||
// g_CrcUpdateT4 = CrcUpdateT4;
|
||||
#if CRC_NUM_TABLES < 8
|
||||
g_CrcUpdate = CrcUpdateT4;
|
||||
#else // CRC_NUM_TABLES >= 8
|
||||
g_CrcUpdateT8 = CrcUpdateT8;
|
||||
|
||||
/*
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
if (!CPU_Is_InOrder())
|
||||
#endif
|
||||
g_CrcUpdate = CrcUpdateT8;
|
||||
*/
|
||||
g_CrcUpdate = CrcUpdateT8;
|
||||
#endif
|
||||
|
||||
#else
|
||||
{
|
||||
#ifndef MY_CPU_BE
|
||||
#ifndef MY_CPU_BE
|
||||
UInt32 k = 0x01020304;
|
||||
const Byte *p = (const Byte *)&k;
|
||||
if (p[0] == 4 && p[1] == 3)
|
||||
{
|
||||
g_CrcUpdateT4 = CrcUpdateT4;
|
||||
g_CrcUpdate = CrcUpdateT4;
|
||||
#if CRC_NUM_TABLES >= 8
|
||||
g_CrcUpdateT8 = CrcUpdateT8;
|
||||
g_CrcUpdate = CrcUpdateT8;
|
||||
#if CRC_NUM_TABLES < 8
|
||||
// g_CrcUpdateT4 = CrcUpdateT4;
|
||||
g_CrcUpdate = CrcUpdateT4;
|
||||
#else // CRC_NUM_TABLES >= 8
|
||||
g_CrcUpdateT8 = CrcUpdateT8;
|
||||
g_CrcUpdate = CrcUpdateT8;
|
||||
#endif
|
||||
}
|
||||
else if (p[0] != 1 || p[1] != 2)
|
||||
g_CrcUpdate = CrcUpdateT1;
|
||||
else
|
||||
#endif
|
||||
#endif // MY_CPU_BE
|
||||
{
|
||||
for (i = 256 * CRC_NUM_TABLES - 1; i >= 256; i--)
|
||||
{
|
||||
UInt32 x = g_CrcTable[(size_t)i - 256];
|
||||
g_CrcTable[i] = CRC_UINT32_SWAP(x);
|
||||
const UInt32 x = g_CrcTable[(size_t)i - 256];
|
||||
g_CrcTable[i] = Z7_BSWAP32(x);
|
||||
}
|
||||
g_CrcUpdateT4 = CrcUpdateT1_BeT4;
|
||||
g_CrcUpdate = CrcUpdateT1_BeT4;
|
||||
#if CRC_NUM_TABLES >= 8
|
||||
g_CrcUpdateT8 = CrcUpdateT1_BeT8;
|
||||
g_CrcUpdate = CrcUpdateT1_BeT8;
|
||||
#if CRC_NUM_TABLES <= 4
|
||||
g_CrcUpdate = CrcUpdateT1;
|
||||
#elif CRC_NUM_TABLES <= 8
|
||||
// g_CrcUpdateT4 = CrcUpdateT1_BeT4;
|
||||
g_CrcUpdate = CrcUpdateT1_BeT4;
|
||||
#else // CRC_NUM_TABLES > 8
|
||||
g_CrcUpdateT8 = CrcUpdateT1_BeT8;
|
||||
g_CrcUpdate = CrcUpdateT1_BeT8;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif // CRC_NUM_TABLES < 4
|
||||
|
||||
#ifdef MY_CPU_LE
|
||||
#ifdef USE_ARM64_CRC
|
||||
|
@ -320,3 +334,7 @@ void MY_FAST_CALL CrcGenerateTable()
|
|||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef kCrcPoly
|
||||
#undef CRC64_NUM_TABLES
|
||||
#undef CRC_UPDATE_BYTE_2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* 7zCrcOpt.c -- CRC32 calculation
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -9,8 +9,8 @@
|
|||
|
||||
#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
|
||||
|
@ -29,8 +29,8 @@ UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const U
|
|||
return v;
|
||||
}
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
|
||||
UInt32 Z7_FASTCALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
for (; size > 0 && ((unsigned)(ptrdiff_t)p & 7) != 0; size--, p++)
|
||||
|
@ -61,11 +61,11 @@ UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const U
|
|||
|
||||
#ifndef MY_CPU_LE
|
||||
|
||||
#define CRC_UINT32_SWAP(v) ((v >> 24) | ((v >> 8) & 0xFF00) | ((v << 8) & 0xFF0000) | (v << 24))
|
||||
#define CRC_UINT32_SWAP(v) Z7_BSWAP32(v)
|
||||
|
||||
#define CRC_UPDATE_BYTE_2_BE(crc, b) (table[(((crc) >> 24) ^ (b))] ^ ((crc) << 8))
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
table += 0x100;
|
||||
|
@ -86,7 +86,7 @@ UInt32 MY_FAST_CALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, co
|
|||
return CRC_UINT32_SWAP(v);
|
||||
}
|
||||
|
||||
UInt32 MY_FAST_CALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
UInt32 Z7_FASTCALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
table += 0x100;
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
/* 7zDec.c -- Decoding from 7z folder
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
/* #define _7ZIP_PPMD_SUPPPORT */
|
||||
/* #define Z7_PPMD_SUPPORT */
|
||||
|
||||
#include "7z.h"
|
||||
#include "7zCrc.h"
|
||||
|
@ -16,27 +16,49 @@
|
|||
#include "Delta.h"
|
||||
#include "LzmaDec.h"
|
||||
#include "Lzma2Dec.h"
|
||||
#ifdef _7ZIP_PPMD_SUPPPORT
|
||||
#ifdef Z7_PPMD_SUPPORT
|
||||
#include "Ppmd7.h"
|
||||
#endif
|
||||
|
||||
#define k_Copy 0
|
||||
#ifndef _7Z_NO_METHOD_LZMA2
|
||||
#ifndef Z7_NO_METHOD_LZMA2
|
||||
#define k_LZMA2 0x21
|
||||
#endif
|
||||
#define k_LZMA 0x30101
|
||||
#define k_BCJ2 0x303011B
|
||||
#ifndef _7Z_NO_METHODS_FILTERS
|
||||
|
||||
#if !defined(Z7_NO_METHODS_FILTERS)
|
||||
#define Z7_USE_BRANCH_FILTER
|
||||
#endif
|
||||
|
||||
#if !defined(Z7_NO_METHODS_FILTERS) || \
|
||||
defined(Z7_USE_NATIVE_BRANCH_FILTER) && defined(MY_CPU_ARM64)
|
||||
#define Z7_USE_FILTER_ARM64
|
||||
#ifndef Z7_USE_BRANCH_FILTER
|
||||
#define Z7_USE_BRANCH_FILTER
|
||||
#endif
|
||||
#define k_ARM64 0xa
|
||||
#endif
|
||||
|
||||
#if !defined(Z7_NO_METHODS_FILTERS) || \
|
||||
defined(Z7_USE_NATIVE_BRANCH_FILTER) && defined(MY_CPU_ARMT)
|
||||
#define Z7_USE_FILTER_ARMT
|
||||
#ifndef Z7_USE_BRANCH_FILTER
|
||||
#define Z7_USE_BRANCH_FILTER
|
||||
#endif
|
||||
#define k_ARMT 0x3030701
|
||||
#endif
|
||||
|
||||
#ifndef Z7_NO_METHODS_FILTERS
|
||||
#define k_Delta 3
|
||||
#define k_BCJ 0x3030103
|
||||
#define k_PPC 0x3030205
|
||||
#define k_IA64 0x3030401
|
||||
#define k_ARM 0x3030501
|
||||
#define k_ARMT 0x3030701
|
||||
#define k_SPARC 0x3030805
|
||||
#endif
|
||||
|
||||
#ifdef _7ZIP_PPMD_SUPPPORT
|
||||
#ifdef Z7_PPMD_SUPPORT
|
||||
|
||||
#define k_PPMD 0x30401
|
||||
|
||||
|
@ -49,12 +71,12 @@ typedef struct
|
|||
UInt64 processed;
|
||||
BoolInt extra;
|
||||
SRes res;
|
||||
const ILookInStream *inStream;
|
||||
ILookInStreamPtr inStream;
|
||||
} CByteInToLook;
|
||||
|
||||
static Byte ReadByte(const IByteIn *pp)
|
||||
static Byte ReadByte(IByteInPtr pp)
|
||||
{
|
||||
CByteInToLook *p = CONTAINER_FROM_VTBL(pp, CByteInToLook, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CByteInToLook)
|
||||
if (p->cur != p->end)
|
||||
return *p->cur++;
|
||||
if (p->res == SZ_OK)
|
||||
|
@ -67,13 +89,13 @@ static Byte ReadByte(const IByteIn *pp)
|
|||
p->cur = p->begin;
|
||||
p->end = p->begin + size;
|
||||
if (size != 0)
|
||||
return *p->cur++;;
|
||||
return *p->cur++;
|
||||
}
|
||||
p->extra = True;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static SRes SzDecodePpmd(const Byte *props, unsigned propsSize, UInt64 inSize, const ILookInStream *inStream,
|
||||
static SRes SzDecodePpmd(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStreamPtr inStream,
|
||||
Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain)
|
||||
{
|
||||
CPpmd7 ppmd;
|
||||
|
@ -138,14 +160,14 @@ static SRes SzDecodePpmd(const Byte *props, unsigned propsSize, UInt64 inSize, c
|
|||
#endif
|
||||
|
||||
|
||||
static SRes SzDecodeLzma(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStream *inStream,
|
||||
static SRes SzDecodeLzma(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStreamPtr inStream,
|
||||
Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain)
|
||||
{
|
||||
CLzmaDec state;
|
||||
SRes res = SZ_OK;
|
||||
|
||||
LzmaDec_Construct(&state);
|
||||
RINOK(LzmaDec_AllocateProbs(&state, props, propsSize, allocMain));
|
||||
LzmaDec_CONSTRUCT(&state)
|
||||
RINOK(LzmaDec_AllocateProbs(&state, props, propsSize, allocMain))
|
||||
state.dic = outBuffer;
|
||||
state.dicBufSize = outSize;
|
||||
LzmaDec_Init(&state);
|
||||
|
@ -196,18 +218,18 @@ static SRes SzDecodeLzma(const Byte *props, unsigned propsSize, UInt64 inSize, I
|
|||
}
|
||||
|
||||
|
||||
#ifndef _7Z_NO_METHOD_LZMA2
|
||||
#ifndef Z7_NO_METHOD_LZMA2
|
||||
|
||||
static SRes SzDecodeLzma2(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStream *inStream,
|
||||
static SRes SzDecodeLzma2(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStreamPtr inStream,
|
||||
Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain)
|
||||
{
|
||||
CLzma2Dec state;
|
||||
SRes res = SZ_OK;
|
||||
|
||||
Lzma2Dec_Construct(&state);
|
||||
Lzma2Dec_CONSTRUCT(&state)
|
||||
if (propsSize != 1)
|
||||
return SZ_ERROR_DATA;
|
||||
RINOK(Lzma2Dec_AllocateProbs(&state, props[0], allocMain));
|
||||
RINOK(Lzma2Dec_AllocateProbs(&state, props[0], allocMain))
|
||||
state.decoder.dic = outBuffer;
|
||||
state.decoder.dicBufSize = outSize;
|
||||
Lzma2Dec_Init(&state);
|
||||
|
@ -257,7 +279,7 @@ static SRes SzDecodeLzma2(const Byte *props, unsigned propsSize, UInt64 inSize,
|
|||
#endif
|
||||
|
||||
|
||||
static SRes SzDecodeCopy(UInt64 inSize, ILookInStream *inStream, Byte *outBuffer)
|
||||
static SRes SzDecodeCopy(UInt64 inSize, ILookInStreamPtr inStream, Byte *outBuffer)
|
||||
{
|
||||
while (inSize > 0)
|
||||
{
|
||||
|
@ -265,13 +287,13 @@ static SRes SzDecodeCopy(UInt64 inSize, ILookInStream *inStream, Byte *outBuffer
|
|||
size_t curSize = (1 << 18);
|
||||
if (curSize > inSize)
|
||||
curSize = (size_t)inSize;
|
||||
RINOK(ILookInStream_Look(inStream, &inBuf, &curSize));
|
||||
RINOK(ILookInStream_Look(inStream, &inBuf, &curSize))
|
||||
if (curSize == 0)
|
||||
return SZ_ERROR_INPUT_EOF;
|
||||
memcpy(outBuffer, inBuf, curSize);
|
||||
outBuffer += curSize;
|
||||
inSize -= curSize;
|
||||
RINOK(ILookInStream_Skip(inStream, curSize));
|
||||
RINOK(ILookInStream_Skip(inStream, curSize))
|
||||
}
|
||||
return SZ_OK;
|
||||
}
|
||||
|
@ -282,12 +304,12 @@ static BoolInt IS_MAIN_METHOD(UInt32 m)
|
|||
{
|
||||
case k_Copy:
|
||||
case k_LZMA:
|
||||
#ifndef _7Z_NO_METHOD_LZMA2
|
||||
#ifndef Z7_NO_METHOD_LZMA2
|
||||
case k_LZMA2:
|
||||
#endif
|
||||
#ifdef _7ZIP_PPMD_SUPPPORT
|
||||
#endif
|
||||
#ifdef Z7_PPMD_SUPPORT
|
||||
case k_PPMD:
|
||||
#endif
|
||||
#endif
|
||||
return True;
|
||||
}
|
||||
return False;
|
||||
|
@ -317,7 +339,7 @@ static SRes CheckSupportedFolder(const CSzFolder *f)
|
|||
}
|
||||
|
||||
|
||||
#ifndef _7Z_NO_METHODS_FILTERS
|
||||
#if defined(Z7_USE_BRANCH_FILTER)
|
||||
|
||||
if (f->NumCoders == 2)
|
||||
{
|
||||
|
@ -333,13 +355,20 @@ static SRes CheckSupportedFolder(const CSzFolder *f)
|
|||
return SZ_ERROR_UNSUPPORTED;
|
||||
switch ((UInt32)c->MethodID)
|
||||
{
|
||||
#if !defined(Z7_NO_METHODS_FILTERS)
|
||||
case k_Delta:
|
||||
case k_BCJ:
|
||||
case k_PPC:
|
||||
case k_IA64:
|
||||
case k_SPARC:
|
||||
case k_ARM:
|
||||
#endif
|
||||
#ifdef Z7_USE_FILTER_ARM64
|
||||
case k_ARM64:
|
||||
#endif
|
||||
#ifdef Z7_USE_FILTER_ARMT
|
||||
case k_ARMT:
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
|
@ -372,15 +401,16 @@ static SRes CheckSupportedFolder(const CSzFolder *f)
|
|||
return SZ_ERROR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
#ifndef _7Z_NO_METHODS_FILTERS
|
||||
#define CASE_BRA_CONV(isa) case k_ ## isa: isa ## _Convert(outBuffer, outSize, 0, 0); break;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static SRes SzFolder_Decode2(const CSzFolder *folder,
|
||||
const Byte *propsData,
|
||||
const UInt64 *unpackSizes,
|
||||
const UInt64 *packPositions,
|
||||
ILookInStream *inStream, UInt64 startPos,
|
||||
ILookInStreamPtr inStream, UInt64 startPos,
|
||||
Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain,
|
||||
Byte *tempBuf[])
|
||||
{
|
||||
|
@ -389,7 +419,7 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
SizeT tempSize3 = 0;
|
||||
Byte *tempBuf3 = 0;
|
||||
|
||||
RINOK(CheckSupportedFolder(folder));
|
||||
RINOK(CheckSupportedFolder(folder))
|
||||
|
||||
for (ci = 0; ci < folder->NumCoders; ci++)
|
||||
{
|
||||
|
@ -404,8 +434,8 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
SizeT outSizeCur = outSize;
|
||||
if (folder->NumCoders == 4)
|
||||
{
|
||||
UInt32 indices[] = { 3, 2, 0 };
|
||||
UInt64 unpackSize = unpackSizes[ci];
|
||||
const UInt32 indices[] = { 3, 2, 0 };
|
||||
const UInt64 unpackSize = unpackSizes[ci];
|
||||
si = indices[ci];
|
||||
if (ci < 2)
|
||||
{
|
||||
|
@ -431,37 +461,37 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
}
|
||||
offset = packPositions[si];
|
||||
inSize = packPositions[(size_t)si + 1] - offset;
|
||||
RINOK(LookInStream_SeekTo(inStream, startPos + offset));
|
||||
RINOK(LookInStream_SeekTo(inStream, startPos + offset))
|
||||
|
||||
if (coder->MethodID == k_Copy)
|
||||
{
|
||||
if (inSize != outSizeCur) /* check it */
|
||||
return SZ_ERROR_DATA;
|
||||
RINOK(SzDecodeCopy(inSize, inStream, outBufCur));
|
||||
RINOK(SzDecodeCopy(inSize, inStream, outBufCur))
|
||||
}
|
||||
else if (coder->MethodID == k_LZMA)
|
||||
{
|
||||
RINOK(SzDecodeLzma(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain));
|
||||
RINOK(SzDecodeLzma(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain))
|
||||
}
|
||||
#ifndef _7Z_NO_METHOD_LZMA2
|
||||
#ifndef Z7_NO_METHOD_LZMA2
|
||||
else if (coder->MethodID == k_LZMA2)
|
||||
{
|
||||
RINOK(SzDecodeLzma2(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain));
|
||||
RINOK(SzDecodeLzma2(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain))
|
||||
}
|
||||
#endif
|
||||
#ifdef _7ZIP_PPMD_SUPPPORT
|
||||
#endif
|
||||
#ifdef Z7_PPMD_SUPPORT
|
||||
else if (coder->MethodID == k_PPMD)
|
||||
{
|
||||
RINOK(SzDecodePpmd(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain));
|
||||
RINOK(SzDecodePpmd(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain))
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
else
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
}
|
||||
else if (coder->MethodID == k_BCJ2)
|
||||
{
|
||||
UInt64 offset = packPositions[1];
|
||||
UInt64 s3Size = packPositions[2] - offset;
|
||||
const UInt64 offset = packPositions[1];
|
||||
const UInt64 s3Size = packPositions[2] - offset;
|
||||
|
||||
if (ci != 3)
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
|
@ -473,8 +503,8 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
if (!tempBuf[2] && tempSizes[2] != 0)
|
||||
return SZ_ERROR_MEM;
|
||||
|
||||
RINOK(LookInStream_SeekTo(inStream, startPos + offset));
|
||||
RINOK(SzDecodeCopy(s3Size, inStream, tempBuf[2]));
|
||||
RINOK(LookInStream_SeekTo(inStream, startPos + offset))
|
||||
RINOK(SzDecodeCopy(s3Size, inStream, tempBuf[2]))
|
||||
|
||||
if ((tempSizes[0] & 3) != 0 ||
|
||||
(tempSizes[1] & 3) != 0 ||
|
||||
|
@ -493,26 +523,22 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
p.destLim = outBuffer + outSize;
|
||||
|
||||
Bcj2Dec_Init(&p);
|
||||
RINOK(Bcj2Dec_Decode(&p));
|
||||
RINOK(Bcj2Dec_Decode(&p))
|
||||
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < 4; i++)
|
||||
if (p.bufs[i] != p.lims[i])
|
||||
return SZ_ERROR_DATA;
|
||||
|
||||
if (!Bcj2Dec_IsFinished(&p))
|
||||
return SZ_ERROR_DATA;
|
||||
|
||||
if (p.dest != p.destLim
|
||||
|| p.state != BCJ2_STREAM_MAIN)
|
||||
if (p.dest != p.destLim || !Bcj2Dec_IsMaybeFinished(&p))
|
||||
return SZ_ERROR_DATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifndef _7Z_NO_METHODS_FILTERS
|
||||
#if defined(Z7_USE_BRANCH_FILTER)
|
||||
else if (ci == 1)
|
||||
{
|
||||
#if !defined(Z7_NO_METHODS_FILTERS)
|
||||
if (coder->MethodID == k_Delta)
|
||||
{
|
||||
if (coder->PropsSize != 1)
|
||||
|
@ -522,31 +548,53 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
Delta_Init(state);
|
||||
Delta_Decode(state, (unsigned)(propsData[coder->PropsOffset]) + 1, outBuffer, outSize);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
|
||||
#ifdef Z7_USE_FILTER_ARM64
|
||||
if (coder->MethodID == k_ARM64)
|
||||
{
|
||||
UInt32 pc = 0;
|
||||
if (coder->PropsSize == 4)
|
||||
pc = GetUi32(propsData + coder->PropsOffset);
|
||||
else if (coder->PropsSize != 0)
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
z7_BranchConv_ARM64_Dec(outBuffer, outSize, pc);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(Z7_NO_METHODS_FILTERS) || defined(Z7_USE_FILTER_ARMT)
|
||||
{
|
||||
if (coder->PropsSize != 0)
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
#define CASE_BRA_CONV(isa) case k_ ## isa: Z7_BRANCH_CONV_DEC(isa)(outBuffer, outSize, 0); break; // pc = 0;
|
||||
switch (coder->MethodID)
|
||||
{
|
||||
#if !defined(Z7_NO_METHODS_FILTERS)
|
||||
case k_BCJ:
|
||||
{
|
||||
UInt32 state;
|
||||
x86_Convert_Init(state);
|
||||
x86_Convert(outBuffer, outSize, 0, &state, 0);
|
||||
UInt32 state = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
|
||||
z7_BranchConvSt_X86_Dec(outBuffer, outSize, 0, &state); // pc = 0
|
||||
break;
|
||||
}
|
||||
CASE_BRA_CONV(PPC)
|
||||
CASE_BRA_CONV(IA64)
|
||||
CASE_BRA_CONV(SPARC)
|
||||
CASE_BRA_CONV(ARM)
|
||||
#endif
|
||||
#if !defined(Z7_NO_METHODS_FILTERS) || defined(Z7_USE_FILTER_ARMT)
|
||||
CASE_BRA_CONV(ARMT)
|
||||
#endif
|
||||
default:
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
} // (c == 1)
|
||||
#endif
|
||||
else
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
}
|
||||
|
@ -556,7 +604,7 @@ static SRes SzFolder_Decode2(const CSzFolder *folder,
|
|||
|
||||
|
||||
SRes SzAr_DecodeFolder(const CSzAr *p, UInt32 folderIndex,
|
||||
ILookInStream *inStream, UInt64 startPos,
|
||||
ILookInStreamPtr inStream, UInt64 startPos,
|
||||
Byte *outBuffer, size_t outSize,
|
||||
ISzAllocPtr allocMain)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* 7zFile.c -- File IO
|
||||
2021-04-29 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -268,7 +268,7 @@ WRes File_Write(CSzFile *p, const void *data, size_t *size)
|
|||
return errno;
|
||||
if (processed == 0)
|
||||
break;
|
||||
data = (void *)((Byte *)data + (size_t)processed);
|
||||
data = (const void *)((const Byte *)data + (size_t)processed);
|
||||
originalSize -= (size_t)processed;
|
||||
*size += (size_t)processed;
|
||||
}
|
||||
|
@ -287,7 +287,8 @@ WRes File_Seek(CSzFile *p, Int64 *pos, ESzSeek origin)
|
|||
DWORD moveMethod;
|
||||
UInt32 low = (UInt32)*pos;
|
||||
LONG high = (LONG)((UInt64)*pos >> 16 >> 16); /* for case when UInt64 is 32-bit only */
|
||||
switch (origin)
|
||||
// (int) to eliminate clang warning
|
||||
switch ((int)origin)
|
||||
{
|
||||
case SZ_SEEK_SET: moveMethod = FILE_BEGIN; break;
|
||||
case SZ_SEEK_CUR: moveMethod = FILE_CURRENT; break;
|
||||
|
@ -308,7 +309,7 @@ WRes File_Seek(CSzFile *p, Int64 *pos, ESzSeek origin)
|
|||
|
||||
int moveMethod; // = origin;
|
||||
|
||||
switch (origin)
|
||||
switch ((int)origin)
|
||||
{
|
||||
case SZ_SEEK_SET: moveMethod = SEEK_SET; break;
|
||||
case SZ_SEEK_CUR: moveMethod = SEEK_CUR; break;
|
||||
|
@ -387,10 +388,10 @@ WRes File_GetLength(CSzFile *p, UInt64 *length)
|
|||
|
||||
/* ---------- FileSeqInStream ---------- */
|
||||
|
||||
static SRes FileSeqInStream_Read(const ISeqInStream *pp, void *buf, size_t *size)
|
||||
static SRes FileSeqInStream_Read(ISeqInStreamPtr pp, void *buf, size_t *size)
|
||||
{
|
||||
CFileSeqInStream *p = CONTAINER_FROM_VTBL(pp, CFileSeqInStream, vt);
|
||||
WRes wres = File_Read(&p->file, buf, size);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileSeqInStream)
|
||||
const WRes wres = File_Read(&p->file, buf, size);
|
||||
p->wres = wres;
|
||||
return (wres == 0) ? SZ_OK : SZ_ERROR_READ;
|
||||
}
|
||||
|
@ -403,18 +404,18 @@ void FileSeqInStream_CreateVTable(CFileSeqInStream *p)
|
|||
|
||||
/* ---------- FileInStream ---------- */
|
||||
|
||||
static SRes FileInStream_Read(const ISeekInStream *pp, void *buf, size_t *size)
|
||||
static SRes FileInStream_Read(ISeekInStreamPtr pp, void *buf, size_t *size)
|
||||
{
|
||||
CFileInStream *p = CONTAINER_FROM_VTBL(pp, CFileInStream, vt);
|
||||
WRes wres = File_Read(&p->file, buf, size);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileInStream)
|
||||
const WRes wres = File_Read(&p->file, buf, size);
|
||||
p->wres = wres;
|
||||
return (wres == 0) ? SZ_OK : SZ_ERROR_READ;
|
||||
}
|
||||
|
||||
static SRes FileInStream_Seek(const ISeekInStream *pp, Int64 *pos, ESzSeek origin)
|
||||
static SRes FileInStream_Seek(ISeekInStreamPtr pp, Int64 *pos, ESzSeek origin)
|
||||
{
|
||||
CFileInStream *p = CONTAINER_FROM_VTBL(pp, CFileInStream, vt);
|
||||
WRes wres = File_Seek(&p->file, pos, origin);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileInStream)
|
||||
const WRes wres = File_Seek(&p->file, pos, origin);
|
||||
p->wres = wres;
|
||||
return (wres == 0) ? SZ_OK : SZ_ERROR_READ;
|
||||
}
|
||||
|
@ -428,10 +429,10 @@ void FileInStream_CreateVTable(CFileInStream *p)
|
|||
|
||||
/* ---------- FileOutStream ---------- */
|
||||
|
||||
static size_t FileOutStream_Write(const ISeqOutStream *pp, const void *data, size_t size)
|
||||
static size_t FileOutStream_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
|
||||
{
|
||||
CFileOutStream *p = CONTAINER_FROM_VTBL(pp, CFileOutStream, vt);
|
||||
WRes wres = File_Write(&p->file, data, &size);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CFileOutStream)
|
||||
const WRes wres = File_Write(&p->file, data, &size);
|
||||
p->wres = wres;
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* 7zStream.c -- 7z Stream functions
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -7,12 +7,33 @@
|
|||
|
||||
#include "7zTypes.h"
|
||||
|
||||
SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType)
|
||||
|
||||
SRes SeqInStream_ReadMax(ISeqInStreamPtr stream, void *buf, size_t *processedSize)
|
||||
{
|
||||
size_t size = *processedSize;
|
||||
*processedSize = 0;
|
||||
while (size != 0)
|
||||
{
|
||||
size_t cur = size;
|
||||
const SRes res = ISeqInStream_Read(stream, buf, &cur);
|
||||
*processedSize += cur;
|
||||
buf = (void *)((Byte *)buf + cur);
|
||||
size -= cur;
|
||||
if (res != SZ_OK)
|
||||
return res;
|
||||
if (cur == 0)
|
||||
return SZ_OK;
|
||||
}
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
SRes SeqInStream_Read2(ISeqInStreamPtr stream, void *buf, size_t size, SRes errorType)
|
||||
{
|
||||
while (size != 0)
|
||||
{
|
||||
size_t processed = size;
|
||||
RINOK(ISeqInStream_Read(stream, buf, &processed));
|
||||
RINOK(ISeqInStream_Read(stream, buf, &processed))
|
||||
if (processed == 0)
|
||||
return errorType;
|
||||
buf = (void *)((Byte *)buf + processed);
|
||||
|
@ -21,42 +42,44 @@ SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes
|
|||
return SZ_OK;
|
||||
}
|
||||
|
||||
SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size)
|
||||
SRes SeqInStream_Read(ISeqInStreamPtr stream, void *buf, size_t size)
|
||||
{
|
||||
return SeqInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
|
||||
}
|
||||
*/
|
||||
|
||||
SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf)
|
||||
|
||||
SRes SeqInStream_ReadByte(ISeqInStreamPtr stream, Byte *buf)
|
||||
{
|
||||
size_t processed = 1;
|
||||
RINOK(ISeqInStream_Read(stream, buf, &processed));
|
||||
RINOK(ISeqInStream_Read(stream, buf, &processed))
|
||||
return (processed == 1) ? SZ_OK : SZ_ERROR_INPUT_EOF;
|
||||
}
|
||||
|
||||
|
||||
|
||||
SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset)
|
||||
SRes LookInStream_SeekTo(ILookInStreamPtr stream, UInt64 offset)
|
||||
{
|
||||
Int64 t = (Int64)offset;
|
||||
return ILookInStream_Seek(stream, &t, SZ_SEEK_SET);
|
||||
}
|
||||
|
||||
SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size)
|
||||
SRes LookInStream_LookRead(ILookInStreamPtr stream, void *buf, size_t *size)
|
||||
{
|
||||
const void *lookBuf;
|
||||
if (*size == 0)
|
||||
return SZ_OK;
|
||||
RINOK(ILookInStream_Look(stream, &lookBuf, size));
|
||||
RINOK(ILookInStream_Look(stream, &lookBuf, size))
|
||||
memcpy(buf, lookBuf, *size);
|
||||
return ILookInStream_Skip(stream, *size);
|
||||
}
|
||||
|
||||
SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType)
|
||||
SRes LookInStream_Read2(ILookInStreamPtr stream, void *buf, size_t size, SRes errorType)
|
||||
{
|
||||
while (size != 0)
|
||||
{
|
||||
size_t processed = size;
|
||||
RINOK(ILookInStream_Read(stream, buf, &processed));
|
||||
RINOK(ILookInStream_Read(stream, buf, &processed))
|
||||
if (processed == 0)
|
||||
return errorType;
|
||||
buf = (void *)((Byte *)buf + processed);
|
||||
|
@ -65,16 +88,16 @@ SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRe
|
|||
return SZ_OK;
|
||||
}
|
||||
|
||||
SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size)
|
||||
SRes LookInStream_Read(ILookInStreamPtr stream, void *buf, size_t size)
|
||||
{
|
||||
return LookInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define GET_LookToRead2 CLookToRead2 *p = CONTAINER_FROM_VTBL(pp, CLookToRead2, vt);
|
||||
#define GET_LookToRead2 Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLookToRead2)
|
||||
|
||||
static SRes LookToRead2_Look_Lookahead(const ILookInStream *pp, const void **buf, size_t *size)
|
||||
static SRes LookToRead2_Look_Lookahead(ILookInStreamPtr pp, const void **buf, size_t *size)
|
||||
{
|
||||
SRes res = SZ_OK;
|
||||
GET_LookToRead2
|
||||
|
@ -93,7 +116,7 @@ static SRes LookToRead2_Look_Lookahead(const ILookInStream *pp, const void **buf
|
|||
return res;
|
||||
}
|
||||
|
||||
static SRes LookToRead2_Look_Exact(const ILookInStream *pp, const void **buf, size_t *size)
|
||||
static SRes LookToRead2_Look_Exact(ILookInStreamPtr pp, const void **buf, size_t *size)
|
||||
{
|
||||
SRes res = SZ_OK;
|
||||
GET_LookToRead2
|
||||
|
@ -113,14 +136,14 @@ static SRes LookToRead2_Look_Exact(const ILookInStream *pp, const void **buf, si
|
|||
return res;
|
||||
}
|
||||
|
||||
static SRes LookToRead2_Skip(const ILookInStream *pp, size_t offset)
|
||||
static SRes LookToRead2_Skip(ILookInStreamPtr pp, size_t offset)
|
||||
{
|
||||
GET_LookToRead2
|
||||
p->pos += offset;
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
static SRes LookToRead2_Read(const ILookInStream *pp, void *buf, size_t *size)
|
||||
static SRes LookToRead2_Read(ILookInStreamPtr pp, void *buf, size_t *size)
|
||||
{
|
||||
GET_LookToRead2
|
||||
size_t rem = p->size - p->pos;
|
||||
|
@ -134,7 +157,7 @@ static SRes LookToRead2_Read(const ILookInStream *pp, void *buf, size_t *size)
|
|||
return SZ_OK;
|
||||
}
|
||||
|
||||
static SRes LookToRead2_Seek(const ILookInStream *pp, Int64 *pos, ESzSeek origin)
|
||||
static SRes LookToRead2_Seek(ILookInStreamPtr pp, Int64 *pos, ESzSeek origin)
|
||||
{
|
||||
GET_LookToRead2
|
||||
p->pos = p->size = 0;
|
||||
|
@ -153,9 +176,9 @@ void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead)
|
|||
|
||||
|
||||
|
||||
static SRes SecToLook_Read(const ISeqInStream *pp, void *buf, size_t *size)
|
||||
static SRes SecToLook_Read(ISeqInStreamPtr pp, void *buf, size_t *size)
|
||||
{
|
||||
CSecToLook *p = CONTAINER_FROM_VTBL(pp, CSecToLook, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSecToLook)
|
||||
return LookInStream_LookRead(p->realStream, buf, size);
|
||||
}
|
||||
|
||||
|
@ -164,9 +187,9 @@ void SecToLook_CreateVTable(CSecToLook *p)
|
|||
p->vt.Read = SecToLook_Read;
|
||||
}
|
||||
|
||||
static SRes SecToRead_Read(const ISeqInStream *pp, void *buf, size_t *size)
|
||||
static SRes SecToRead_Read(ISeqInStreamPtr pp, void *buf, size_t *size)
|
||||
{
|
||||
CSecToRead *p = CONTAINER_FROM_VTBL(pp, CSecToRead, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSecToRead)
|
||||
return ILookInStream_Read(p->realStream, buf, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,393 @@
|
|||
/* Aes.c -- AES encryption / decryption
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "CpuArch.h"
|
||||
#include "Aes.h"
|
||||
|
||||
AES_CODE_FUNC g_AesCbc_Decode;
|
||||
#ifndef Z7_SFX
|
||||
AES_CODE_FUNC g_AesCbc_Encode;
|
||||
AES_CODE_FUNC g_AesCtr_Code;
|
||||
UInt32 g_Aes_SupportedFunctions_Flags;
|
||||
#endif
|
||||
|
||||
static UInt32 T[256 * 4];
|
||||
static const Byte Sbox[256] = {
|
||||
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
|
||||
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
|
||||
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
|
||||
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
|
||||
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
|
||||
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
|
||||
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
|
||||
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
|
||||
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
|
||||
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
|
||||
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
|
||||
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
|
||||
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
|
||||
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
|
||||
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
|
||||
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
|
||||
|
||||
|
||||
static UInt32 D[256 * 4];
|
||||
static Byte InvS[256];
|
||||
|
||||
#define xtime(x) ((((x) << 1) ^ (((x) & 0x80) != 0 ? 0x1B : 0)) & 0xFF)
|
||||
|
||||
#define Ui32(a0, a1, a2, a3) ((UInt32)(a0) | ((UInt32)(a1) << 8) | ((UInt32)(a2) << 16) | ((UInt32)(a3) << 24))
|
||||
|
||||
#define gb0(x) ( (x) & 0xFF)
|
||||
#define gb1(x) (((x) >> ( 8)) & 0xFF)
|
||||
#define gb2(x) (((x) >> (16)) & 0xFF)
|
||||
#define gb3(x) (((x) >> (24)))
|
||||
|
||||
#define gb(n, x) gb ## n(x)
|
||||
|
||||
#define TT(x) (T + (x << 8))
|
||||
#define DD(x) (D + (x << 8))
|
||||
|
||||
|
||||
// #define Z7_SHOW_AES_STATUS
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
#define USE_HW_AES
|
||||
#elif defined(MY_CPU_ARM_OR_ARM64) && defined(MY_CPU_LE)
|
||||
#if defined(__clang__)
|
||||
#if (__clang_major__ >= 8) // fix that check
|
||||
#define USE_HW_AES
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#if (__GNUC__ >= 6) // fix that check
|
||||
#define USE_HW_AES
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
#if _MSC_VER >= 1910
|
||||
#define USE_HW_AES
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef USE_HW_AES
|
||||
#ifdef Z7_SHOW_AES_STATUS
|
||||
#include <stdio.h>
|
||||
#define PRF(x) x
|
||||
#else
|
||||
#define PRF(x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
void AesGenTables(void)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < 256; i++)
|
||||
InvS[Sbox[i]] = (Byte)i;
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
{
|
||||
{
|
||||
const UInt32 a1 = Sbox[i];
|
||||
const UInt32 a2 = xtime(a1);
|
||||
const UInt32 a3 = a2 ^ a1;
|
||||
TT(0)[i] = Ui32(a2, a1, a1, a3);
|
||||
TT(1)[i] = Ui32(a3, a2, a1, a1);
|
||||
TT(2)[i] = Ui32(a1, a3, a2, a1);
|
||||
TT(3)[i] = Ui32(a1, a1, a3, a2);
|
||||
}
|
||||
{
|
||||
const UInt32 a1 = InvS[i];
|
||||
const UInt32 a2 = xtime(a1);
|
||||
const UInt32 a4 = xtime(a2);
|
||||
const UInt32 a8 = xtime(a4);
|
||||
const UInt32 a9 = a8 ^ a1;
|
||||
const UInt32 aB = a8 ^ a2 ^ a1;
|
||||
const UInt32 aD = a8 ^ a4 ^ a1;
|
||||
const UInt32 aE = a8 ^ a4 ^ a2;
|
||||
DD(0)[i] = Ui32(aE, a9, aD, aB);
|
||||
DD(1)[i] = Ui32(aB, aE, a9, aD);
|
||||
DD(2)[i] = Ui32(aD, aB, aE, a9);
|
||||
DD(3)[i] = Ui32(a9, aD, aB, aE);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
AES_CODE_FUNC d = AesCbc_Decode;
|
||||
#ifndef Z7_SFX
|
||||
AES_CODE_FUNC e = AesCbc_Encode;
|
||||
AES_CODE_FUNC c = AesCtr_Code;
|
||||
UInt32 flags = 0;
|
||||
#endif
|
||||
|
||||
#ifdef USE_HW_AES
|
||||
if (CPU_IsSupported_AES())
|
||||
{
|
||||
// #pragma message ("AES HW")
|
||||
PRF(printf("\n===AES HW\n"));
|
||||
d = AesCbc_Decode_HW;
|
||||
|
||||
#ifndef Z7_SFX
|
||||
e = AesCbc_Encode_HW;
|
||||
c = AesCtr_Code_HW;
|
||||
flags = k_Aes_SupportedFunctions_HW;
|
||||
#endif
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
if (CPU_IsSupported_VAES_AVX2())
|
||||
{
|
||||
PRF(printf("\n===vaes avx2\n"));
|
||||
d = AesCbc_Decode_HW_256;
|
||||
#ifndef Z7_SFX
|
||||
c = AesCtr_Code_HW_256;
|
||||
flags |= k_Aes_SupportedFunctions_HW_256;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
g_AesCbc_Decode = d;
|
||||
#ifndef Z7_SFX
|
||||
g_AesCbc_Encode = e;
|
||||
g_AesCtr_Code = c;
|
||||
g_Aes_SupportedFunctions_Flags = flags;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define HT(i, x, s) TT(x)[gb(x, s[(i + x) & 3])]
|
||||
|
||||
#define HT4(m, i, s, p) m[i] = \
|
||||
HT(i, 0, s) ^ \
|
||||
HT(i, 1, s) ^ \
|
||||
HT(i, 2, s) ^ \
|
||||
HT(i, 3, s) ^ w[p + i]
|
||||
|
||||
#define HT16(m, s, p) \
|
||||
HT4(m, 0, s, p); \
|
||||
HT4(m, 1, s, p); \
|
||||
HT4(m, 2, s, p); \
|
||||
HT4(m, 3, s, p); \
|
||||
|
||||
#define FT(i, x) Sbox[gb(x, m[(i + x) & 3])]
|
||||
#define FT4(i) dest[i] = Ui32(FT(i, 0), FT(i, 1), FT(i, 2), FT(i, 3)) ^ w[i];
|
||||
|
||||
|
||||
#define HD(i, x, s) DD(x)[gb(x, s[(i - x) & 3])]
|
||||
|
||||
#define HD4(m, i, s, p) m[i] = \
|
||||
HD(i, 0, s) ^ \
|
||||
HD(i, 1, s) ^ \
|
||||
HD(i, 2, s) ^ \
|
||||
HD(i, 3, s) ^ w[p + i];
|
||||
|
||||
#define HD16(m, s, p) \
|
||||
HD4(m, 0, s, p); \
|
||||
HD4(m, 1, s, p); \
|
||||
HD4(m, 2, s, p); \
|
||||
HD4(m, 3, s, p); \
|
||||
|
||||
#define FD(i, x) InvS[gb(x, m[(i - x) & 3])]
|
||||
#define FD4(i) dest[i] = Ui32(FD(i, 0), FD(i, 1), FD(i, 2), FD(i, 3)) ^ w[i];
|
||||
|
||||
void Z7_FASTCALL Aes_SetKey_Enc(UInt32 *w, const Byte *key, unsigned keySize)
|
||||
{
|
||||
unsigned i, m;
|
||||
const UInt32 *wLim;
|
||||
UInt32 t;
|
||||
UInt32 rcon = 1;
|
||||
|
||||
keySize /= 4;
|
||||
w[0] = ((UInt32)keySize / 2) + 3;
|
||||
w += 4;
|
||||
|
||||
for (i = 0; i < keySize; i++, key += 4)
|
||||
w[i] = GetUi32(key);
|
||||
|
||||
t = w[(size_t)keySize - 1];
|
||||
wLim = w + (size_t)keySize * 3 + 28;
|
||||
m = 0;
|
||||
do
|
||||
{
|
||||
if (m == 0)
|
||||
{
|
||||
t = Ui32(Sbox[gb1(t)] ^ rcon, Sbox[gb2(t)], Sbox[gb3(t)], Sbox[gb0(t)]);
|
||||
rcon <<= 1;
|
||||
if (rcon & 0x100)
|
||||
rcon = 0x1b;
|
||||
m = keySize;
|
||||
}
|
||||
else if (m == 4 && keySize > 6)
|
||||
t = Ui32(Sbox[gb0(t)], Sbox[gb1(t)], Sbox[gb2(t)], Sbox[gb3(t)]);
|
||||
m--;
|
||||
t ^= w[0];
|
||||
w[keySize] = t;
|
||||
}
|
||||
while (++w != wLim);
|
||||
}
|
||||
|
||||
void Z7_FASTCALL Aes_SetKey_Dec(UInt32 *w, const Byte *key, unsigned keySize)
|
||||
{
|
||||
unsigned i, num;
|
||||
Aes_SetKey_Enc(w, key, keySize);
|
||||
num = keySize + 20;
|
||||
w += 8;
|
||||
for (i = 0; i < num; i++)
|
||||
{
|
||||
UInt32 r = w[i];
|
||||
w[i] =
|
||||
DD(0)[Sbox[gb0(r)]] ^
|
||||
DD(1)[Sbox[gb1(r)]] ^
|
||||
DD(2)[Sbox[gb2(r)]] ^
|
||||
DD(3)[Sbox[gb3(r)]];
|
||||
}
|
||||
}
|
||||
|
||||
/* Aes_Encode and Aes_Decode functions work with little-endian words.
|
||||
src and dest are pointers to 4 UInt32 words.
|
||||
src and dest can point to same block */
|
||||
|
||||
// Z7_FORCE_INLINE
|
||||
static void Aes_Encode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
|
||||
{
|
||||
UInt32 s[4];
|
||||
UInt32 m[4];
|
||||
UInt32 numRounds2 = w[0];
|
||||
w += 4;
|
||||
s[0] = src[0] ^ w[0];
|
||||
s[1] = src[1] ^ w[1];
|
||||
s[2] = src[2] ^ w[2];
|
||||
s[3] = src[3] ^ w[3];
|
||||
w += 4;
|
||||
for (;;)
|
||||
{
|
||||
HT16(m, s, 0)
|
||||
if (--numRounds2 == 0)
|
||||
break;
|
||||
HT16(s, m, 4)
|
||||
w += 8;
|
||||
}
|
||||
w += 4;
|
||||
FT4(0)
|
||||
FT4(1)
|
||||
FT4(2)
|
||||
FT4(3)
|
||||
}
|
||||
|
||||
Z7_FORCE_INLINE
|
||||
static void Aes_Decode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
|
||||
{
|
||||
UInt32 s[4];
|
||||
UInt32 m[4];
|
||||
UInt32 numRounds2 = w[0];
|
||||
w += 4 + numRounds2 * 8;
|
||||
s[0] = src[0] ^ w[0];
|
||||
s[1] = src[1] ^ w[1];
|
||||
s[2] = src[2] ^ w[2];
|
||||
s[3] = src[3] ^ w[3];
|
||||
for (;;)
|
||||
{
|
||||
w -= 8;
|
||||
HD16(m, s, 4)
|
||||
if (--numRounds2 == 0)
|
||||
break;
|
||||
HD16(s, m, 0)
|
||||
}
|
||||
FD4(0)
|
||||
FD4(1)
|
||||
FD4(2)
|
||||
FD4(3)
|
||||
}
|
||||
|
||||
void AesCbc_Init(UInt32 *p, const Byte *iv)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < 4; i++)
|
||||
p[i] = GetUi32(iv + i * 4);
|
||||
}
|
||||
|
||||
void Z7_FASTCALL AesCbc_Encode(UInt32 *p, Byte *data, size_t numBlocks)
|
||||
{
|
||||
for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
|
||||
{
|
||||
p[0] ^= GetUi32(data);
|
||||
p[1] ^= GetUi32(data + 4);
|
||||
p[2] ^= GetUi32(data + 8);
|
||||
p[3] ^= GetUi32(data + 12);
|
||||
|
||||
Aes_Encode(p + 4, p, p);
|
||||
|
||||
SetUi32(data, p[0])
|
||||
SetUi32(data + 4, p[1])
|
||||
SetUi32(data + 8, p[2])
|
||||
SetUi32(data + 12, p[3])
|
||||
}
|
||||
}
|
||||
|
||||
void Z7_FASTCALL AesCbc_Decode(UInt32 *p, Byte *data, size_t numBlocks)
|
||||
{
|
||||
UInt32 in[4], out[4];
|
||||
for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
|
||||
{
|
||||
in[0] = GetUi32(data);
|
||||
in[1] = GetUi32(data + 4);
|
||||
in[2] = GetUi32(data + 8);
|
||||
in[3] = GetUi32(data + 12);
|
||||
|
||||
Aes_Decode(p + 4, out, in);
|
||||
|
||||
SetUi32(data, p[0] ^ out[0])
|
||||
SetUi32(data + 4, p[1] ^ out[1])
|
||||
SetUi32(data + 8, p[2] ^ out[2])
|
||||
SetUi32(data + 12, p[3] ^ out[3])
|
||||
|
||||
p[0] = in[0];
|
||||
p[1] = in[1];
|
||||
p[2] = in[2];
|
||||
p[3] = in[3];
|
||||
}
|
||||
}
|
||||
|
||||
void Z7_FASTCALL AesCtr_Code(UInt32 *p, Byte *data, size_t numBlocks)
|
||||
{
|
||||
for (; numBlocks != 0; numBlocks--)
|
||||
{
|
||||
UInt32 temp[4];
|
||||
unsigned i;
|
||||
|
||||
if (++p[0] == 0)
|
||||
p[1]++;
|
||||
|
||||
Aes_Encode(p + 4, temp, p);
|
||||
|
||||
for (i = 0; i < 4; i++, data += 4)
|
||||
{
|
||||
const UInt32 t = temp[i];
|
||||
|
||||
#ifdef MY_CPU_LE_UNALIGN
|
||||
*((UInt32 *)(void *)data) ^= t;
|
||||
#else
|
||||
data[0] = (Byte)(data[0] ^ (t & 0xFF));
|
||||
data[1] = (Byte)(data[1] ^ ((t >> 8) & 0xFF));
|
||||
data[2] = (Byte)(data[2] ^ ((t >> 16) & 0xFF));
|
||||
data[3] = (Byte)(data[3] ^ ((t >> 24)));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef xtime
|
||||
#undef Ui32
|
||||
#undef gb0
|
||||
#undef gb1
|
||||
#undef gb2
|
||||
#undef gb3
|
||||
#undef gb
|
||||
#undef TT
|
||||
#undef DD
|
||||
#undef USE_HW_AES
|
||||
#undef PRF
|
|
@ -0,0 +1,840 @@
|
|||
/* AesOpt.c -- AES optimized code for x86 AES hardware instructions
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "Aes.h"
|
||||
#include "CpuArch.h"
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
|
||||
#if defined(__INTEL_COMPILER)
|
||||
#if (__INTEL_COMPILER >= 1110)
|
||||
#define USE_INTEL_AES
|
||||
#if (__INTEL_COMPILER >= 1900)
|
||||
#define USE_INTEL_VAES
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(__clang__) && (__clang_major__ > 3 || __clang_major__ == 3 && __clang_minor__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4)
|
||||
#define USE_INTEL_AES
|
||||
#if !defined(__AES__)
|
||||
#define ATTRIB_AES __attribute__((__target__("aes")))
|
||||
#endif
|
||||
#if defined(__clang__) && (__clang_major__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 8)
|
||||
#define USE_INTEL_VAES
|
||||
#if !defined(__AES__) || !defined(__VAES__) || !defined(__AVX__) || !defined(__AVX2__)
|
||||
#define ATTRIB_VAES __attribute__((__target__("aes,vaes,avx,avx2")))
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
#if (_MSC_VER > 1500) || (_MSC_FULL_VER >= 150030729)
|
||||
#define USE_INTEL_AES
|
||||
#if (_MSC_VER >= 1910)
|
||||
#define USE_INTEL_VAES
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef ATTRIB_AES
|
||||
#define ATTRIB_AES
|
||||
#endif
|
||||
#ifndef ATTRIB_VAES
|
||||
#define ATTRIB_VAES
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef USE_INTEL_AES
|
||||
|
||||
#include <wmmintrin.h>
|
||||
|
||||
#ifndef USE_INTEL_VAES
|
||||
#define AES_TYPE_keys UInt32
|
||||
#define AES_TYPE_data Byte
|
||||
// #define AES_TYPE_keys __m128i
|
||||
// #define AES_TYPE_data __m128i
|
||||
#endif
|
||||
|
||||
#define AES_FUNC_START(name) \
|
||||
void Z7_FASTCALL name(UInt32 *ivAes, Byte *data8, size_t numBlocks)
|
||||
// void Z7_FASTCALL name(__m128i *p, __m128i *data, size_t numBlocks)
|
||||
|
||||
#define AES_FUNC_START2(name) \
|
||||
AES_FUNC_START (name); \
|
||||
ATTRIB_AES \
|
||||
AES_FUNC_START (name)
|
||||
|
||||
#define MM_OP(op, dest, src) dest = op(dest, src);
|
||||
#define MM_OP_m(op, src) MM_OP(op, m, src)
|
||||
|
||||
#define MM_XOR( dest, src) MM_OP(_mm_xor_si128, dest, src)
|
||||
#define AVX_XOR(dest, src) MM_OP(_mm256_xor_si256, dest, src)
|
||||
|
||||
|
||||
AES_FUNC_START2 (AesCbc_Encode_HW)
|
||||
{
|
||||
__m128i *p = (__m128i *)(void *)ivAes;
|
||||
__m128i *data = (__m128i *)(void *)data8;
|
||||
__m128i m = *p;
|
||||
const __m128i k0 = p[2];
|
||||
const __m128i k1 = p[3];
|
||||
const UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
|
||||
for (; numBlocks != 0; numBlocks--, data++)
|
||||
{
|
||||
UInt32 r = numRounds2;
|
||||
const __m128i *w = p + 4;
|
||||
__m128i temp = *data;
|
||||
MM_XOR (temp, k0)
|
||||
MM_XOR (m, temp)
|
||||
MM_OP_m (_mm_aesenc_si128, k1)
|
||||
do
|
||||
{
|
||||
MM_OP_m (_mm_aesenc_si128, w[0])
|
||||
MM_OP_m (_mm_aesenc_si128, w[1])
|
||||
w += 2;
|
||||
}
|
||||
while (--r);
|
||||
MM_OP_m (_mm_aesenclast_si128, w[0])
|
||||
*data = m;
|
||||
}
|
||||
*p = m;
|
||||
}
|
||||
|
||||
|
||||
#define WOP_1(op)
|
||||
#define WOP_2(op) WOP_1 (op) op (m1, 1)
|
||||
#define WOP_3(op) WOP_2 (op) op (m2, 2)
|
||||
#define WOP_4(op) WOP_3 (op) op (m3, 3)
|
||||
#ifdef MY_CPU_AMD64
|
||||
#define WOP_5(op) WOP_4 (op) op (m4, 4)
|
||||
#define WOP_6(op) WOP_5 (op) op (m5, 5)
|
||||
#define WOP_7(op) WOP_6 (op) op (m6, 6)
|
||||
#define WOP_8(op) WOP_7 (op) op (m7, 7)
|
||||
#endif
|
||||
/*
|
||||
#define WOP_9(op) WOP_8 (op) op (m8, 8);
|
||||
#define WOP_10(op) WOP_9 (op) op (m9, 9);
|
||||
#define WOP_11(op) WOP_10(op) op (m10, 10);
|
||||
#define WOP_12(op) WOP_11(op) op (m11, 11);
|
||||
#define WOP_13(op) WOP_12(op) op (m12, 12);
|
||||
#define WOP_14(op) WOP_13(op) op (m13, 13);
|
||||
*/
|
||||
|
||||
#ifdef MY_CPU_AMD64
|
||||
#define NUM_WAYS 8
|
||||
#define WOP_M1 WOP_8
|
||||
#else
|
||||
#define NUM_WAYS 4
|
||||
#define WOP_M1 WOP_4
|
||||
#endif
|
||||
|
||||
#define WOP(op) op (m0, 0) WOP_M1(op)
|
||||
|
||||
|
||||
#define DECLARE_VAR(reg, ii) __m128i reg;
|
||||
#define LOAD_data( reg, ii) reg = data[ii];
|
||||
#define STORE_data( reg, ii) data[ii] = reg;
|
||||
#if (NUM_WAYS > 1)
|
||||
#define XOR_data_M1(reg, ii) MM_XOR (reg, data[ii- 1])
|
||||
#endif
|
||||
|
||||
#define AVX_DECLARE_VAR(reg, ii) __m256i reg;
|
||||
#define AVX_LOAD_data( reg, ii) reg = ((const __m256i *)(const void *)data)[ii];
|
||||
#define AVX_STORE_data( reg, ii) ((__m256i *)(void *)data)[ii] = reg;
|
||||
#define AVX_XOR_data_M1(reg, ii) AVX_XOR (reg, (((const __m256i *)(const void *)(data - 1))[ii]))
|
||||
|
||||
#define MM_OP_key(op, reg) MM_OP(op, reg, key);
|
||||
|
||||
#define AES_DEC( reg, ii) MM_OP_key (_mm_aesdec_si128, reg)
|
||||
#define AES_DEC_LAST( reg, ii) MM_OP_key (_mm_aesdeclast_si128, reg)
|
||||
#define AES_ENC( reg, ii) MM_OP_key (_mm_aesenc_si128, reg)
|
||||
#define AES_ENC_LAST( reg, ii) MM_OP_key (_mm_aesenclast_si128, reg)
|
||||
#define AES_XOR( reg, ii) MM_OP_key (_mm_xor_si128, reg)
|
||||
|
||||
|
||||
#define AVX_AES_DEC( reg, ii) MM_OP_key (_mm256_aesdec_epi128, reg)
|
||||
#define AVX_AES_DEC_LAST( reg, ii) MM_OP_key (_mm256_aesdeclast_epi128, reg)
|
||||
#define AVX_AES_ENC( reg, ii) MM_OP_key (_mm256_aesenc_epi128, reg)
|
||||
#define AVX_AES_ENC_LAST( reg, ii) MM_OP_key (_mm256_aesenclast_epi128, reg)
|
||||
#define AVX_AES_XOR( reg, ii) MM_OP_key (_mm256_xor_si256, reg)
|
||||
|
||||
#define CTR_START(reg, ii) MM_OP (_mm_add_epi64, ctr, one) reg = ctr;
|
||||
#define CTR_END( reg, ii) MM_XOR (data[ii], reg)
|
||||
|
||||
#define AVX_CTR_START(reg, ii) MM_OP (_mm256_add_epi64, ctr2, two) reg = _mm256_xor_si256(ctr2, key);
|
||||
#define AVX_CTR_END( reg, ii) AVX_XOR (((__m256i *)(void *)data)[ii], reg)
|
||||
|
||||
#define WOP_KEY(op, n) { \
|
||||
const __m128i key = w[n]; \
|
||||
WOP(op); }
|
||||
|
||||
#define AVX_WOP_KEY(op, n) { \
|
||||
const __m256i key = w[n]; \
|
||||
WOP(op); }
|
||||
|
||||
|
||||
#define WIDE_LOOP_START \
|
||||
dataEnd = data + numBlocks; \
|
||||
if (numBlocks >= NUM_WAYS) \
|
||||
{ dataEnd -= NUM_WAYS; do { \
|
||||
|
||||
|
||||
#define WIDE_LOOP_END \
|
||||
data += NUM_WAYS; \
|
||||
} while (data <= dataEnd); \
|
||||
dataEnd += NUM_WAYS; } \
|
||||
|
||||
|
||||
#define SINGLE_LOOP \
|
||||
for (; data < dataEnd; data++)
|
||||
|
||||
|
||||
#define NUM_AES_KEYS_MAX 15
|
||||
|
||||
#define WIDE_LOOP_START_AVX(OP) \
|
||||
dataEnd = data + numBlocks; \
|
||||
if (numBlocks >= NUM_WAYS * 2) \
|
||||
{ __m256i keys[NUM_AES_KEYS_MAX]; \
|
||||
UInt32 ii; \
|
||||
OP \
|
||||
for (ii = 0; ii < numRounds; ii++) \
|
||||
keys[ii] = _mm256_broadcastsi128_si256(p[ii]); \
|
||||
dataEnd -= NUM_WAYS * 2; do { \
|
||||
|
||||
|
||||
#define WIDE_LOOP_END_AVX(OP) \
|
||||
data += NUM_WAYS * 2; \
|
||||
} while (data <= dataEnd); \
|
||||
dataEnd += NUM_WAYS * 2; \
|
||||
OP \
|
||||
_mm256_zeroupper(); \
|
||||
} \
|
||||
|
||||
/* MSVC for x86: If we don't call _mm256_zeroupper(), and -arch:IA32 is not specified,
|
||||
MSVC still can insert vzeroupper instruction. */
|
||||
|
||||
|
||||
AES_FUNC_START2 (AesCbc_Decode_HW)
|
||||
{
|
||||
__m128i *p = (__m128i *)(void *)ivAes;
|
||||
__m128i *data = (__m128i *)(void *)data8;
|
||||
__m128i iv = *p;
|
||||
const __m128i *wStart = p + *(const UInt32 *)(p + 1) * 2 + 2 - 1;
|
||||
const __m128i *dataEnd;
|
||||
p += 2;
|
||||
|
||||
WIDE_LOOP_START
|
||||
{
|
||||
const __m128i *w = wStart;
|
||||
|
||||
WOP (DECLARE_VAR)
|
||||
WOP (LOAD_data)
|
||||
WOP_KEY (AES_XOR, 1)
|
||||
|
||||
do
|
||||
{
|
||||
WOP_KEY (AES_DEC, 0)
|
||||
w--;
|
||||
}
|
||||
while (w != p);
|
||||
WOP_KEY (AES_DEC_LAST, 0)
|
||||
|
||||
MM_XOR (m0, iv)
|
||||
WOP_M1 (XOR_data_M1)
|
||||
iv = data[NUM_WAYS - 1];
|
||||
WOP (STORE_data)
|
||||
}
|
||||
WIDE_LOOP_END
|
||||
|
||||
SINGLE_LOOP
|
||||
{
|
||||
const __m128i *w = wStart - 1;
|
||||
__m128i m = _mm_xor_si128 (w[2], *data);
|
||||
do
|
||||
{
|
||||
MM_OP_m (_mm_aesdec_si128, w[1])
|
||||
MM_OP_m (_mm_aesdec_si128, w[0])
|
||||
w -= 2;
|
||||
}
|
||||
while (w != p);
|
||||
MM_OP_m (_mm_aesdec_si128, w[1])
|
||||
MM_OP_m (_mm_aesdeclast_si128, w[0])
|
||||
|
||||
MM_XOR (m, iv)
|
||||
iv = *data;
|
||||
*data = m;
|
||||
}
|
||||
|
||||
p[-2] = iv;
|
||||
}
|
||||
|
||||
|
||||
AES_FUNC_START2 (AesCtr_Code_HW)
|
||||
{
|
||||
__m128i *p = (__m128i *)(void *)ivAes;
|
||||
__m128i *data = (__m128i *)(void *)data8;
|
||||
__m128i ctr = *p;
|
||||
UInt32 numRoundsMinus2 = *(const UInt32 *)(p + 1) * 2 - 1;
|
||||
const __m128i *dataEnd;
|
||||
__m128i one = _mm_cvtsi32_si128(1);
|
||||
|
||||
p += 2;
|
||||
|
||||
WIDE_LOOP_START
|
||||
{
|
||||
const __m128i *w = p;
|
||||
UInt32 r = numRoundsMinus2;
|
||||
WOP (DECLARE_VAR)
|
||||
WOP (CTR_START)
|
||||
WOP_KEY (AES_XOR, 0)
|
||||
w += 1;
|
||||
do
|
||||
{
|
||||
WOP_KEY (AES_ENC, 0)
|
||||
w += 1;
|
||||
}
|
||||
while (--r);
|
||||
WOP_KEY (AES_ENC_LAST, 0)
|
||||
|
||||
WOP (CTR_END)
|
||||
}
|
||||
WIDE_LOOP_END
|
||||
|
||||
SINGLE_LOOP
|
||||
{
|
||||
UInt32 numRounds2 = *(const UInt32 *)(p - 2 + 1) - 1;
|
||||
const __m128i *w = p;
|
||||
__m128i m;
|
||||
MM_OP (_mm_add_epi64, ctr, one)
|
||||
m = _mm_xor_si128 (ctr, p[0]);
|
||||
w += 1;
|
||||
do
|
||||
{
|
||||
MM_OP_m (_mm_aesenc_si128, w[0])
|
||||
MM_OP_m (_mm_aesenc_si128, w[1])
|
||||
w += 2;
|
||||
}
|
||||
while (--numRounds2);
|
||||
MM_OP_m (_mm_aesenc_si128, w[0])
|
||||
MM_OP_m (_mm_aesenclast_si128, w[1])
|
||||
MM_XOR (*data, m)
|
||||
}
|
||||
|
||||
p[-2] = ctr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifdef USE_INTEL_VAES
|
||||
|
||||
/*
|
||||
GCC before 2013-Jun:
|
||||
<immintrin.h>:
|
||||
#ifdef __AVX__
|
||||
#include <avxintrin.h>
|
||||
#endif
|
||||
GCC after 2013-Jun:
|
||||
<immintrin.h>:
|
||||
#include <avxintrin.h>
|
||||
CLANG 3.8+:
|
||||
{
|
||||
<immintrin.h>:
|
||||
#if !defined(_MSC_VER) || defined(__AVX__)
|
||||
#include <avxintrin.h>
|
||||
#endif
|
||||
|
||||
if (the compiler is clang for Windows and if global arch is not set for __AVX__)
|
||||
[ if (defined(_MSC_VER) && !defined(__AVX__)) ]
|
||||
{
|
||||
<immintrin.h> doesn't include <avxintrin.h>
|
||||
and we have 2 ways to fix it:
|
||||
1) we can define required __AVX__ before <immintrin.h>
|
||||
or
|
||||
2) we can include <avxintrin.h> after <immintrin.h>
|
||||
}
|
||||
}
|
||||
|
||||
If we include <avxintrin.h> manually for GCC/CLANG, it's
|
||||
required that <immintrin.h> must be included before <avxintrin.h>.
|
||||
*/
|
||||
|
||||
/*
|
||||
#if defined(__clang__) && defined(_MSC_VER)
|
||||
#define __AVX__
|
||||
#define __AVX2__
|
||||
#define __VAES__
|
||||
#endif
|
||||
*/
|
||||
|
||||
#include <immintrin.h>
|
||||
#if defined(__clang__) && defined(_MSC_VER)
|
||||
#if !defined(__AVX__)
|
||||
#include <avxintrin.h>
|
||||
#endif
|
||||
#if !defined(__AVX2__)
|
||||
#include <avx2intrin.h>
|
||||
#endif
|
||||
#if !defined(__VAES__)
|
||||
#include <vaesintrin.h>
|
||||
#endif
|
||||
#endif // __clang__ && _MSC_VER
|
||||
|
||||
|
||||
#define VAES_FUNC_START2(name) \
|
||||
AES_FUNC_START (name); \
|
||||
ATTRIB_VAES \
|
||||
AES_FUNC_START (name)
|
||||
|
||||
VAES_FUNC_START2 (AesCbc_Decode_HW_256)
|
||||
{
|
||||
__m128i *p = (__m128i *)(void *)ivAes;
|
||||
__m128i *data = (__m128i *)(void *)data8;
|
||||
__m128i iv = *p;
|
||||
const __m128i *dataEnd;
|
||||
UInt32 numRounds = *(const UInt32 *)(p + 1) * 2 + 1;
|
||||
p += 2;
|
||||
|
||||
WIDE_LOOP_START_AVX(;)
|
||||
{
|
||||
const __m256i *w = keys + numRounds - 2;
|
||||
|
||||
WOP (AVX_DECLARE_VAR)
|
||||
WOP (AVX_LOAD_data)
|
||||
AVX_WOP_KEY (AVX_AES_XOR, 1)
|
||||
|
||||
do
|
||||
{
|
||||
AVX_WOP_KEY (AVX_AES_DEC, 0)
|
||||
w--;
|
||||
}
|
||||
while (w != keys);
|
||||
AVX_WOP_KEY (AVX_AES_DEC_LAST, 0)
|
||||
|
||||
AVX_XOR (m0, _mm256_setr_m128i(iv, data[0]))
|
||||
WOP_M1 (AVX_XOR_data_M1)
|
||||
iv = data[NUM_WAYS * 2 - 1];
|
||||
WOP (AVX_STORE_data)
|
||||
}
|
||||
WIDE_LOOP_END_AVX(;)
|
||||
|
||||
SINGLE_LOOP
|
||||
{
|
||||
const __m128i *w = p + *(const UInt32 *)(p + 1 - 2) * 2 + 1 - 3;
|
||||
__m128i m = _mm_xor_si128 (w[2], *data);
|
||||
do
|
||||
{
|
||||
MM_OP_m (_mm_aesdec_si128, w[1])
|
||||
MM_OP_m (_mm_aesdec_si128, w[0])
|
||||
w -= 2;
|
||||
}
|
||||
while (w != p);
|
||||
MM_OP_m (_mm_aesdec_si128, w[1])
|
||||
MM_OP_m (_mm_aesdeclast_si128, w[0])
|
||||
|
||||
MM_XOR (m, iv)
|
||||
iv = *data;
|
||||
*data = m;
|
||||
}
|
||||
|
||||
p[-2] = iv;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
SSE2: _mm_cvtsi32_si128 : movd
|
||||
AVX: _mm256_setr_m128i : vinsertf128
|
||||
AVX2: _mm256_add_epi64 : vpaddq ymm, ymm, ymm
|
||||
_mm256_extracti128_si256 : vextracti128
|
||||
_mm256_broadcastsi128_si256 : vbroadcasti128
|
||||
*/
|
||||
|
||||
#define AVX_CTR_LOOP_START \
|
||||
ctr2 = _mm256_setr_m128i(_mm_sub_epi64(ctr, one), ctr); \
|
||||
two = _mm256_setr_m128i(one, one); \
|
||||
two = _mm256_add_epi64(two, two); \
|
||||
|
||||
// two = _mm256_setr_epi64x(2, 0, 2, 0);
|
||||
|
||||
#define AVX_CTR_LOOP_ENC \
|
||||
ctr = _mm256_extracti128_si256 (ctr2, 1); \
|
||||
|
||||
VAES_FUNC_START2 (AesCtr_Code_HW_256)
|
||||
{
|
||||
__m128i *p = (__m128i *)(void *)ivAes;
|
||||
__m128i *data = (__m128i *)(void *)data8;
|
||||
__m128i ctr = *p;
|
||||
UInt32 numRounds = *(const UInt32 *)(p + 1) * 2 + 1;
|
||||
const __m128i *dataEnd;
|
||||
__m128i one = _mm_cvtsi32_si128(1);
|
||||
__m256i ctr2, two;
|
||||
p += 2;
|
||||
|
||||
WIDE_LOOP_START_AVX (AVX_CTR_LOOP_START)
|
||||
{
|
||||
const __m256i *w = keys;
|
||||
UInt32 r = numRounds - 2;
|
||||
WOP (AVX_DECLARE_VAR)
|
||||
AVX_WOP_KEY (AVX_CTR_START, 0)
|
||||
|
||||
w += 1;
|
||||
do
|
||||
{
|
||||
AVX_WOP_KEY (AVX_AES_ENC, 0)
|
||||
w += 1;
|
||||
}
|
||||
while (--r);
|
||||
AVX_WOP_KEY (AVX_AES_ENC_LAST, 0)
|
||||
|
||||
WOP (AVX_CTR_END)
|
||||
}
|
||||
WIDE_LOOP_END_AVX (AVX_CTR_LOOP_ENC)
|
||||
|
||||
SINGLE_LOOP
|
||||
{
|
||||
UInt32 numRounds2 = *(const UInt32 *)(p - 2 + 1) - 1;
|
||||
const __m128i *w = p;
|
||||
__m128i m;
|
||||
MM_OP (_mm_add_epi64, ctr, one)
|
||||
m = _mm_xor_si128 (ctr, p[0]);
|
||||
w += 1;
|
||||
do
|
||||
{
|
||||
MM_OP_m (_mm_aesenc_si128, w[0])
|
||||
MM_OP_m (_mm_aesenc_si128, w[1])
|
||||
w += 2;
|
||||
}
|
||||
while (--numRounds2);
|
||||
MM_OP_m (_mm_aesenc_si128, w[0])
|
||||
MM_OP_m (_mm_aesenclast_si128, w[1])
|
||||
MM_XOR (*data, m)
|
||||
}
|
||||
|
||||
p[-2] = ctr;
|
||||
}
|
||||
|
||||
#endif // USE_INTEL_VAES
|
||||
|
||||
#else // USE_INTEL_AES
|
||||
|
||||
/* no USE_INTEL_AES */
|
||||
|
||||
#pragma message("AES HW_SW stub was used")
|
||||
|
||||
#define AES_TYPE_keys UInt32
|
||||
#define AES_TYPE_data Byte
|
||||
|
||||
#define AES_FUNC_START(name) \
|
||||
void Z7_FASTCALL name(UInt32 *p, Byte *data, size_t numBlocks) \
|
||||
|
||||
#define AES_COMPAT_STUB(name) \
|
||||
AES_FUNC_START(name); \
|
||||
AES_FUNC_START(name ## _HW) \
|
||||
{ name(p, data, numBlocks); }
|
||||
|
||||
AES_COMPAT_STUB (AesCbc_Encode)
|
||||
AES_COMPAT_STUB (AesCbc_Decode)
|
||||
AES_COMPAT_STUB (AesCtr_Code)
|
||||
|
||||
#endif // USE_INTEL_AES
|
||||
|
||||
|
||||
#ifndef USE_INTEL_VAES
|
||||
|
||||
#pragma message("VAES HW_SW stub was used")
|
||||
|
||||
#define VAES_COMPAT_STUB(name) \
|
||||
void Z7_FASTCALL name ## _256(UInt32 *p, Byte *data, size_t numBlocks); \
|
||||
void Z7_FASTCALL name ## _256(UInt32 *p, Byte *data, size_t numBlocks) \
|
||||
{ name((AES_TYPE_keys *)(void *)p, (AES_TYPE_data *)(void *)data, numBlocks); }
|
||||
|
||||
VAES_COMPAT_STUB (AesCbc_Decode_HW)
|
||||
VAES_COMPAT_STUB (AesCtr_Code_HW)
|
||||
|
||||
#endif // ! USE_INTEL_VAES
|
||||
|
||||
|
||||
#elif defined(MY_CPU_ARM_OR_ARM64) && defined(MY_CPU_LE)
|
||||
|
||||
#if defined(__clang__)
|
||||
#if (__clang_major__ >= 8) // fix that check
|
||||
#define USE_HW_AES
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#if (__GNUC__ >= 6) // fix that check
|
||||
#define USE_HW_AES
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
#if _MSC_VER >= 1910
|
||||
#define USE_HW_AES
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef USE_HW_AES
|
||||
|
||||
// #pragma message("=== AES HW === ")
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
#ifdef MY_CPU_ARM64
|
||||
#define ATTRIB_AES __attribute__((__target__("+crypto")))
|
||||
#else
|
||||
#define ATTRIB_AES __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
|
||||
#endif
|
||||
#else
|
||||
// _MSC_VER
|
||||
// for arm32
|
||||
#define _ARM_USE_NEW_NEON_INTRINSICS
|
||||
#endif
|
||||
|
||||
#ifndef ATTRIB_AES
|
||||
#define ATTRIB_AES
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && defined(MY_CPU_ARM64)
|
||||
#include <arm64_neon.h>
|
||||
#else
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
typedef uint8x16_t v128;
|
||||
|
||||
#define AES_FUNC_START(name) \
|
||||
void Z7_FASTCALL name(UInt32 *ivAes, Byte *data8, size_t numBlocks)
|
||||
// void Z7_FASTCALL name(v128 *p, v128 *data, size_t numBlocks)
|
||||
|
||||
#define AES_FUNC_START2(name) \
|
||||
AES_FUNC_START (name); \
|
||||
ATTRIB_AES \
|
||||
AES_FUNC_START (name)
|
||||
|
||||
#define MM_OP(op, dest, src) dest = op(dest, src);
|
||||
#define MM_OP_m(op, src) MM_OP(op, m, src)
|
||||
#define MM_OP1_m(op) m = op(m);
|
||||
|
||||
#define MM_XOR( dest, src) MM_OP(veorq_u8, dest, src)
|
||||
#define MM_XOR_m( src) MM_XOR(m, src)
|
||||
|
||||
#define AES_E_m(k) MM_OP_m (vaeseq_u8, k)
|
||||
#define AES_E_MC_m(k) AES_E_m (k) MM_OP1_m(vaesmcq_u8)
|
||||
|
||||
|
||||
AES_FUNC_START2 (AesCbc_Encode_HW)
|
||||
{
|
||||
v128 *p = (v128*)(void*)ivAes;
|
||||
v128 *data = (v128*)(void*)data8;
|
||||
v128 m = *p;
|
||||
const v128 k0 = p[2];
|
||||
const v128 k1 = p[3];
|
||||
const v128 k2 = p[4];
|
||||
const v128 k3 = p[5];
|
||||
const v128 k4 = p[6];
|
||||
const v128 k5 = p[7];
|
||||
const v128 k6 = p[8];
|
||||
const v128 k7 = p[9];
|
||||
const v128 k8 = p[10];
|
||||
const v128 k9 = p[11];
|
||||
const UInt32 numRounds2 = *(const UInt32 *)(p + 1);
|
||||
const v128 *w = p + ((size_t)numRounds2 * 2);
|
||||
const v128 k_z1 = w[1];
|
||||
const v128 k_z0 = w[2];
|
||||
for (; numBlocks != 0; numBlocks--, data++)
|
||||
{
|
||||
MM_XOR_m (*data);
|
||||
AES_E_MC_m (k0)
|
||||
AES_E_MC_m (k1)
|
||||
AES_E_MC_m (k2)
|
||||
AES_E_MC_m (k3)
|
||||
AES_E_MC_m (k4)
|
||||
AES_E_MC_m (k5)
|
||||
AES_E_MC_m (k6)
|
||||
AES_E_MC_m (k7)
|
||||
AES_E_MC_m (k8)
|
||||
if (numRounds2 >= 6)
|
||||
{
|
||||
AES_E_MC_m (k9)
|
||||
AES_E_MC_m (p[12])
|
||||
if (numRounds2 != 6)
|
||||
{
|
||||
AES_E_MC_m (p[13])
|
||||
AES_E_MC_m (p[14])
|
||||
}
|
||||
}
|
||||
AES_E_m (k_z1)
|
||||
MM_XOR_m (k_z0);
|
||||
*data = m;
|
||||
}
|
||||
*p = m;
|
||||
}
|
||||
|
||||
|
||||
#define WOP_1(op)
|
||||
#define WOP_2(op) WOP_1 (op) op (m1, 1)
|
||||
#define WOP_3(op) WOP_2 (op) op (m2, 2)
|
||||
#define WOP_4(op) WOP_3 (op) op (m3, 3)
|
||||
#define WOP_5(op) WOP_4 (op) op (m4, 4)
|
||||
#define WOP_6(op) WOP_5 (op) op (m5, 5)
|
||||
#define WOP_7(op) WOP_6 (op) op (m6, 6)
|
||||
#define WOP_8(op) WOP_7 (op) op (m7, 7)
|
||||
|
||||
#define NUM_WAYS 8
|
||||
#define WOP_M1 WOP_8
|
||||
|
||||
#define WOP(op) op (m0, 0) WOP_M1(op)
|
||||
|
||||
#define DECLARE_VAR(reg, ii) v128 reg;
|
||||
#define LOAD_data( reg, ii) reg = data[ii];
|
||||
#define STORE_data( reg, ii) data[ii] = reg;
|
||||
#if (NUM_WAYS > 1)
|
||||
#define XOR_data_M1(reg, ii) MM_XOR (reg, data[ii- 1])
|
||||
#endif
|
||||
|
||||
#define MM_OP_key(op, reg) MM_OP (op, reg, key)
|
||||
|
||||
#define AES_D_m(k) MM_OP_m (vaesdq_u8, k)
|
||||
#define AES_D_IMC_m(k) AES_D_m (k) MM_OP1_m (vaesimcq_u8)
|
||||
|
||||
#define AES_XOR( reg, ii) MM_OP_key (veorq_u8, reg)
|
||||
#define AES_D( reg, ii) MM_OP_key (vaesdq_u8, reg)
|
||||
#define AES_E( reg, ii) MM_OP_key (vaeseq_u8, reg)
|
||||
|
||||
#define AES_D_IMC( reg, ii) AES_D (reg, ii) reg = vaesimcq_u8(reg);
|
||||
#define AES_E_MC( reg, ii) AES_E (reg, ii) reg = vaesmcq_u8(reg);
|
||||
|
||||
#define CTR_START(reg, ii) MM_OP (vaddq_u64, ctr, one) reg = vreinterpretq_u8_u64(ctr);
|
||||
#define CTR_END( reg, ii) MM_XOR (data[ii], reg)
|
||||
|
||||
#define WOP_KEY(op, n) { \
|
||||
const v128 key = w[n]; \
|
||||
WOP(op) }
|
||||
|
||||
#define WIDE_LOOP_START \
|
||||
dataEnd = data + numBlocks; \
|
||||
if (numBlocks >= NUM_WAYS) \
|
||||
{ dataEnd -= NUM_WAYS; do { \
|
||||
|
||||
#define WIDE_LOOP_END \
|
||||
data += NUM_WAYS; \
|
||||
} while (data <= dataEnd); \
|
||||
dataEnd += NUM_WAYS; } \
|
||||
|
||||
#define SINGLE_LOOP \
|
||||
for (; data < dataEnd; data++)
|
||||
|
||||
|
||||
AES_FUNC_START2 (AesCbc_Decode_HW)
|
||||
{
|
||||
v128 *p = (v128*)(void*)ivAes;
|
||||
v128 *data = (v128*)(void*)data8;
|
||||
v128 iv = *p;
|
||||
const v128 *wStart = p + ((size_t)*(const UInt32 *)(p + 1)) * 2;
|
||||
const v128 *dataEnd;
|
||||
p += 2;
|
||||
|
||||
WIDE_LOOP_START
|
||||
{
|
||||
const v128 *w = wStart;
|
||||
WOP (DECLARE_VAR)
|
||||
WOP (LOAD_data)
|
||||
WOP_KEY (AES_D_IMC, 2)
|
||||
do
|
||||
{
|
||||
WOP_KEY (AES_D_IMC, 1)
|
||||
WOP_KEY (AES_D_IMC, 0)
|
||||
w -= 2;
|
||||
}
|
||||
while (w != p);
|
||||
WOP_KEY (AES_D, 1)
|
||||
WOP_KEY (AES_XOR, 0)
|
||||
MM_XOR (m0, iv);
|
||||
WOP_M1 (XOR_data_M1)
|
||||
iv = data[NUM_WAYS - 1];
|
||||
WOP (STORE_data)
|
||||
}
|
||||
WIDE_LOOP_END
|
||||
|
||||
SINGLE_LOOP
|
||||
{
|
||||
const v128 *w = wStart;
|
||||
v128 m = *data;
|
||||
AES_D_IMC_m (w[2])
|
||||
do
|
||||
{
|
||||
AES_D_IMC_m (w[1]);
|
||||
AES_D_IMC_m (w[0]);
|
||||
w -= 2;
|
||||
}
|
||||
while (w != p);
|
||||
AES_D_m (w[1]);
|
||||
MM_XOR_m (w[0]);
|
||||
MM_XOR_m (iv);
|
||||
iv = *data;
|
||||
*data = m;
|
||||
}
|
||||
|
||||
p[-2] = iv;
|
||||
}
|
||||
|
||||
|
||||
AES_FUNC_START2 (AesCtr_Code_HW)
|
||||
{
|
||||
v128 *p = (v128*)(void*)ivAes;
|
||||
v128 *data = (v128*)(void*)data8;
|
||||
uint64x2_t ctr = vreinterpretq_u64_u8(*p);
|
||||
const v128 *wEnd = p + ((size_t)*(const UInt32 *)(p + 1)) * 2;
|
||||
const v128 *dataEnd;
|
||||
uint64x2_t one = vdupq_n_u64(0);
|
||||
one = vsetq_lane_u64(1, one, 0);
|
||||
p += 2;
|
||||
|
||||
WIDE_LOOP_START
|
||||
{
|
||||
const v128 *w = p;
|
||||
WOP (DECLARE_VAR)
|
||||
WOP (CTR_START)
|
||||
do
|
||||
{
|
||||
WOP_KEY (AES_E_MC, 0)
|
||||
WOP_KEY (AES_E_MC, 1)
|
||||
w += 2;
|
||||
}
|
||||
while (w != wEnd);
|
||||
WOP_KEY (AES_E_MC, 0)
|
||||
WOP_KEY (AES_E, 1)
|
||||
WOP_KEY (AES_XOR, 2)
|
||||
WOP (CTR_END)
|
||||
}
|
||||
WIDE_LOOP_END
|
||||
|
||||
SINGLE_LOOP
|
||||
{
|
||||
const v128 *w = p;
|
||||
v128 m;
|
||||
CTR_START (m, 0);
|
||||
do
|
||||
{
|
||||
AES_E_MC_m (w[0]);
|
||||
AES_E_MC_m (w[1]);
|
||||
w += 2;
|
||||
}
|
||||
while (w != wEnd);
|
||||
AES_E_MC_m (w[0])
|
||||
AES_E_m (w[1])
|
||||
MM_XOR_m (w[2])
|
||||
CTR_END (m, 0)
|
||||
}
|
||||
|
||||
p[-2] = vreinterpretq_u8_u64(ctr);
|
||||
}
|
||||
|
||||
#endif // USE_HW_AES
|
||||
|
||||
#endif // MY_CPU_ARM_OR_ARM64
|
||||
|
||||
#undef NUM_WAYS
|
||||
#undef WOP_M1
|
||||
#undef WOP
|
||||
#undef DECLARE_VAR
|
||||
#undef LOAD_data
|
||||
#undef STORE_data
|
||||
#undef USE_INTEL_AES
|
||||
#undef USE_HW_AES
|
|
@ -1,38 +1,54 @@
|
|||
/* Alloc.c -- Memory allocation functions
|
||||
2021-07-13 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <Windows.h>
|
||||
#include "7zWindows.h"
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "Alloc.h"
|
||||
|
||||
/* #define _SZ_ALLOC_DEBUG */
|
||||
#ifdef _WIN32
|
||||
#ifdef Z7_LARGE_PAGES
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
typedef void (*Z7_voidFunction)(void);
|
||||
#define MY_CAST_FUNC (Z7_voidFunction)
|
||||
#elif defined(_MSC_VER) && _MSC_VER > 1920
|
||||
#define MY_CAST_FUNC (void *)
|
||||
// #pragma warning(disable : 4191) // 'type cast': unsafe conversion from 'FARPROC' to 'void (__cdecl *)()'
|
||||
#else
|
||||
#define MY_CAST_FUNC
|
||||
#endif
|
||||
#endif // Z7_LARGE_PAGES
|
||||
#endif // _WIN32
|
||||
|
||||
/* use _SZ_ALLOC_DEBUG to debug alloc/free operations */
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
// #define SZ_ALLOC_DEBUG
|
||||
/* #define SZ_ALLOC_DEBUG */
|
||||
|
||||
/* use SZ_ALLOC_DEBUG to debug alloc/free operations */
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
int g_allocCount = 0;
|
||||
int g_allocCountMid = 0;
|
||||
int g_allocCountBig = 0;
|
||||
static int g_allocCount = 0;
|
||||
#ifdef _WIN32
|
||||
static int g_allocCountMid = 0;
|
||||
static int g_allocCountBig = 0;
|
||||
#endif
|
||||
|
||||
|
||||
#define CONVERT_INT_TO_STR(charType, tempSize) \
|
||||
unsigned char temp[tempSize]; unsigned i = 0; \
|
||||
while (val >= 10) { temp[i++] = (unsigned char)('0' + (unsigned)(val % 10)); val /= 10; } \
|
||||
char temp[tempSize]; unsigned i = 0; \
|
||||
while (val >= 10) { temp[i++] = (char)('0' + (unsigned)(val % 10)); val /= 10; } \
|
||||
*s++ = (charType)('0' + (unsigned)val); \
|
||||
while (i != 0) { i--; *s++ = temp[i]; } \
|
||||
*s = 0;
|
||||
|
||||
static void ConvertUInt64ToString(UInt64 val, char *s)
|
||||
{
|
||||
CONVERT_INT_TO_STR(char, 24);
|
||||
CONVERT_INT_TO_STR(char, 24)
|
||||
}
|
||||
|
||||
#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
|
||||
|
@ -77,7 +93,7 @@ static void PrintAligned(const char *s, size_t align)
|
|||
Print(s);
|
||||
}
|
||||
|
||||
static void PrintLn()
|
||||
static void PrintLn(void)
|
||||
{
|
||||
Print("\n");
|
||||
}
|
||||
|
@ -89,10 +105,10 @@ static void PrintHex(UInt64 v, size_t align)
|
|||
PrintAligned(s, align);
|
||||
}
|
||||
|
||||
static void PrintDec(UInt64 v, size_t align)
|
||||
static void PrintDec(int v, size_t align)
|
||||
{
|
||||
char s[32];
|
||||
ConvertUInt64ToString(v, s);
|
||||
ConvertUInt64ToString((unsigned)v, s);
|
||||
PrintAligned(s, align);
|
||||
}
|
||||
|
||||
|
@ -102,12 +118,19 @@ static void PrintAddr(void *p)
|
|||
}
|
||||
|
||||
|
||||
#define PRINT_ALLOC(name, cnt, size, ptr) \
|
||||
#define PRINT_REALLOC(name, cnt, size, ptr) { \
|
||||
Print(name " "); \
|
||||
if (!ptr) PrintDec(cnt++, 10); \
|
||||
PrintHex(size, 10); \
|
||||
PrintAddr(ptr); \
|
||||
PrintLn(); }
|
||||
|
||||
#define PRINT_ALLOC(name, cnt, size, ptr) { \
|
||||
Print(name " "); \
|
||||
PrintDec(cnt++, 10); \
|
||||
PrintHex(size, 10); \
|
||||
PrintAddr(ptr); \
|
||||
PrintLn();
|
||||
PrintLn(); }
|
||||
|
||||
#define PRINT_FREE(name, cnt, ptr) if (ptr) { \
|
||||
Print(name " "); \
|
||||
|
@ -117,7 +140,9 @@ static void PrintAddr(void *p)
|
|||
|
||||
#else
|
||||
|
||||
#ifdef _WIN32
|
||||
#define PRINT_ALLOC(name, cnt, size, ptr)
|
||||
#endif
|
||||
#define PRINT_FREE(name, cnt, ptr)
|
||||
#define Print(s)
|
||||
#define PrintLn()
|
||||
|
@ -127,16 +152,31 @@ static void PrintAddr(void *p)
|
|||
#endif
|
||||
|
||||
|
||||
/*
|
||||
by specification:
|
||||
malloc(non_NULL, 0) : returns NULL or a unique pointer value that can later be successfully passed to free()
|
||||
realloc(NULL, size) : the call is equivalent to malloc(size)
|
||||
realloc(non_NULL, 0) : the call is equivalent to free(ptr)
|
||||
|
||||
in main compilers:
|
||||
malloc(0) : returns non_NULL
|
||||
realloc(NULL, 0) : returns non_NULL
|
||||
realloc(non_NULL, 0) : returns NULL
|
||||
*/
|
||||
|
||||
|
||||
void *MyAlloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
PRINT_ALLOC("Alloc ", g_allocCount, size, NULL);
|
||||
#ifdef _SZ_ALLOC_DEBUG
|
||||
// PRINT_ALLOC("Alloc ", g_allocCount, size, NULL)
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
{
|
||||
void *p = malloc(size);
|
||||
// PRINT_ALLOC("Alloc ", g_allocCount, size, p);
|
||||
if (p)
|
||||
{
|
||||
PRINT_ALLOC("Alloc ", g_allocCount, size, p)
|
||||
}
|
||||
return p;
|
||||
}
|
||||
#else
|
||||
|
@ -146,33 +186,64 @@ void *MyAlloc(size_t size)
|
|||
|
||||
void MyFree(void *address)
|
||||
{
|
||||
PRINT_FREE("Free ", g_allocCount, address);
|
||||
PRINT_FREE("Free ", g_allocCount, address)
|
||||
|
||||
free(address);
|
||||
}
|
||||
|
||||
void *MyRealloc(void *address, size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
{
|
||||
MyFree(address);
|
||||
return NULL;
|
||||
}
|
||||
// PRINT_REALLOC("Realloc ", g_allocCount, size, address)
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
{
|
||||
void *p = realloc(address, size);
|
||||
if (p)
|
||||
{
|
||||
PRINT_REALLOC("Realloc ", g_allocCount, size, address)
|
||||
}
|
||||
return p;
|
||||
}
|
||||
#else
|
||||
return realloc(address, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
void *MidAlloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
|
||||
PRINT_ALLOC("Alloc-Mid", g_allocCountMid, size, NULL);
|
||||
|
||||
#ifdef SZ_ALLOC_DEBUG
|
||||
{
|
||||
void *p = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
if (p)
|
||||
{
|
||||
PRINT_ALLOC("Alloc-Mid", g_allocCountMid, size, p)
|
||||
}
|
||||
return p;
|
||||
}
|
||||
#else
|
||||
return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MidFree(void *address)
|
||||
{
|
||||
PRINT_FREE("Free-Mid", g_allocCountMid, address);
|
||||
PRINT_FREE("Free-Mid", g_allocCountMid, address)
|
||||
|
||||
if (!address)
|
||||
return;
|
||||
VirtualFree(address, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
#ifdef _7ZIP_LARGE_PAGES
|
||||
#ifdef Z7_LARGE_PAGES
|
||||
|
||||
#ifdef MEM_LARGE_PAGES
|
||||
#define MY__MEM_LARGE_PAGES MEM_LARGE_PAGES
|
||||
|
@ -183,34 +254,35 @@ void MidFree(void *address)
|
|||
extern
|
||||
SIZE_T g_LargePageSize;
|
||||
SIZE_T g_LargePageSize = 0;
|
||||
typedef SIZE_T (WINAPI *GetLargePageMinimumP)(VOID);
|
||||
typedef SIZE_T (WINAPI *Func_GetLargePageMinimum)(VOID);
|
||||
|
||||
#endif // _7ZIP_LARGE_PAGES
|
||||
|
||||
void SetLargePageSize()
|
||||
void SetLargePageSize(void)
|
||||
{
|
||||
#ifdef _7ZIP_LARGE_PAGES
|
||||
#ifdef Z7_LARGE_PAGES
|
||||
SIZE_T size;
|
||||
GetLargePageMinimumP largePageMinimum = (GetLargePageMinimumP)
|
||||
GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "GetLargePageMinimum");
|
||||
if (!largePageMinimum)
|
||||
const
|
||||
Func_GetLargePageMinimum fn =
|
||||
(Func_GetLargePageMinimum) MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")),
|
||||
"GetLargePageMinimum");
|
||||
if (!fn)
|
||||
return;
|
||||
size = largePageMinimum();
|
||||
size = fn();
|
||||
if (size == 0 || (size & (size - 1)) != 0)
|
||||
return;
|
||||
g_LargePageSize = size;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // Z7_LARGE_PAGES
|
||||
|
||||
void *BigAlloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
|
||||
PRINT_ALLOC("Alloc-Big", g_allocCountBig, size, NULL);
|
||||
|
||||
#ifdef _7ZIP_LARGE_PAGES
|
||||
PRINT_ALLOC("Alloc-Big", g_allocCountBig, size, NULL)
|
||||
|
||||
#ifdef Z7_LARGE_PAGES
|
||||
{
|
||||
SIZE_T ps = g_LargePageSize;
|
||||
if (ps != 0 && ps <= (1 << 30) && size > (ps / 2))
|
||||
|
@ -220,38 +292,38 @@ void *BigAlloc(size_t size)
|
|||
size2 = (size + ps) & ~ps;
|
||||
if (size2 >= size)
|
||||
{
|
||||
void *res = VirtualAlloc(NULL, size2, MEM_COMMIT | MY__MEM_LARGE_PAGES, PAGE_READWRITE);
|
||||
if (res)
|
||||
return res;
|
||||
void *p = VirtualAlloc(NULL, size2, MEM_COMMIT | MY__MEM_LARGE_PAGES, PAGE_READWRITE);
|
||||
if (p)
|
||||
{
|
||||
PRINT_ALLOC("Alloc-BM ", g_allocCountMid, size2, p)
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
return MidAlloc(size);
|
||||
}
|
||||
|
||||
void BigFree(void *address)
|
||||
{
|
||||
PRINT_FREE("Free-Big", g_allocCountBig, address);
|
||||
|
||||
if (!address)
|
||||
return;
|
||||
VirtualFree(address, 0, MEM_RELEASE);
|
||||
PRINT_FREE("Free-Big", g_allocCountBig, address)
|
||||
MidFree(address);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // _WIN32
|
||||
|
||||
|
||||
static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MyAlloc(size); }
|
||||
static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MyFree(address); }
|
||||
static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return MyAlloc(size); }
|
||||
static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) MyFree(address); }
|
||||
const ISzAlloc g_Alloc = { SzAlloc, SzFree };
|
||||
|
||||
#ifdef _WIN32
|
||||
static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MidAlloc(size); }
|
||||
static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MidFree(address); }
|
||||
static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return BigAlloc(size); }
|
||||
static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); BigFree(address); }
|
||||
static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return MidAlloc(size); }
|
||||
static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) MidFree(address); }
|
||||
static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return BigAlloc(size); }
|
||||
static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) BigFree(address); }
|
||||
const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };
|
||||
const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };
|
||||
#endif
|
||||
|
@ -334,7 +406,7 @@ static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)
|
|||
void *p;
|
||||
void *pAligned;
|
||||
size_t newSize;
|
||||
UNUSED_VAR(pp);
|
||||
UNUSED_VAR(pp)
|
||||
|
||||
/* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
|
||||
block to prevent cache line sharing with another allocated blocks */
|
||||
|
@ -362,7 +434,7 @@ static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)
|
|||
#else
|
||||
|
||||
void *p;
|
||||
UNUSED_VAR(pp);
|
||||
UNUSED_VAR(pp)
|
||||
if (posix_memalign(&p, ALLOC_ALIGN_SIZE, size))
|
||||
return NULL;
|
||||
|
||||
|
@ -377,7 +449,7 @@ static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)
|
|||
|
||||
static void SzAlignedFree(ISzAllocPtr pp, void *address)
|
||||
{
|
||||
UNUSED_VAR(pp);
|
||||
UNUSED_VAR(pp)
|
||||
#ifndef USE_posix_memalign
|
||||
if (address)
|
||||
MyFree(((void **)address)[-1]);
|
||||
|
@ -401,7 +473,7 @@ const ISzAlloc g_AlignedAlloc = { SzAlignedAlloc, SzAlignedFree };
|
|||
|
||||
static void *AlignOffsetAlloc_Alloc(ISzAllocPtr pp, size_t size)
|
||||
{
|
||||
CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt);
|
||||
const CAlignOffsetAlloc *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CAlignOffsetAlloc, vt);
|
||||
void *adr;
|
||||
void *pAligned;
|
||||
size_t newSize;
|
||||
|
@ -447,7 +519,7 @@ static void AlignOffsetAlloc_Free(ISzAllocPtr pp, void *address)
|
|||
{
|
||||
if (address)
|
||||
{
|
||||
CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt);
|
||||
const CAlignOffsetAlloc *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CAlignOffsetAlloc, vt);
|
||||
PrintLn();
|
||||
Print("- Aligned Free: ");
|
||||
PrintLn();
|
||||
|
|
|
@ -1,29 +1,24 @@
|
|||
/* Bcj2.c -- BCJ2 Decoder (Converter for x86 code)
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-03-01 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "Bcj2.h"
|
||||
#include "CpuArch.h"
|
||||
|
||||
#define CProb UInt16
|
||||
|
||||
#define kTopValue ((UInt32)1 << 24)
|
||||
#define kNumModelBits 11
|
||||
#define kBitModelTotal (1 << kNumModelBits)
|
||||
#define kNumBitModelTotalBits 11
|
||||
#define kBitModelTotal (1 << kNumBitModelTotalBits)
|
||||
#define kNumMoveBits 5
|
||||
|
||||
#define _IF_BIT_0 ttt = *prob; bound = (p->range >> kNumModelBits) * ttt; if (p->code < bound)
|
||||
#define _UPDATE_0 p->range = bound; *prob = (CProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
|
||||
#define _UPDATE_1 p->range -= bound; p->code -= bound; *prob = (CProb)(ttt - (ttt >> kNumMoveBits));
|
||||
// UInt32 bcj2_stats[256 + 2][2];
|
||||
|
||||
void Bcj2Dec_Init(CBcj2Dec *p)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
p->state = BCJ2_DEC_STATE_OK;
|
||||
p->state = BCJ2_STREAM_RC; // BCJ2_DEC_STATE_OK;
|
||||
p->ip = 0;
|
||||
p->temp[3] = 0;
|
||||
p->temp = 0;
|
||||
p->range = 0;
|
||||
p->code = 0;
|
||||
for (i = 0; i < sizeof(p->probs) / sizeof(p->probs[0]); i++)
|
||||
|
@ -32,217 +27,248 @@ void Bcj2Dec_Init(CBcj2Dec *p)
|
|||
|
||||
SRes Bcj2Dec_Decode(CBcj2Dec *p)
|
||||
{
|
||||
UInt32 v = p->temp;
|
||||
// const Byte *src;
|
||||
if (p->range <= 5)
|
||||
{
|
||||
p->state = BCJ2_DEC_STATE_OK;
|
||||
UInt32 code = p->code;
|
||||
p->state = BCJ2_DEC_STATE_ERROR; /* for case if we return SZ_ERROR_DATA; */
|
||||
for (; p->range != 5; p->range++)
|
||||
{
|
||||
if (p->range == 1 && p->code != 0)
|
||||
if (p->range == 1 && code != 0)
|
||||
return SZ_ERROR_DATA;
|
||||
|
||||
if (p->bufs[BCJ2_STREAM_RC] == p->lims[BCJ2_STREAM_RC])
|
||||
{
|
||||
p->state = BCJ2_STREAM_RC;
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
|
||||
code = (code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
|
||||
p->code = code;
|
||||
}
|
||||
|
||||
if (p->code == 0xFFFFFFFF)
|
||||
if (code == 0xffffffff)
|
||||
return SZ_ERROR_DATA;
|
||||
|
||||
p->range = 0xFFFFFFFF;
|
||||
p->range = 0xffffffff;
|
||||
}
|
||||
else if (p->state >= BCJ2_DEC_STATE_ORIG_0)
|
||||
// else
|
||||
{
|
||||
while (p->state <= BCJ2_DEC_STATE_ORIG_3)
|
||||
unsigned state = p->state;
|
||||
// we check BCJ2_IS_32BIT_STREAM() here instead of check in the main loop
|
||||
if (BCJ2_IS_32BIT_STREAM(state))
|
||||
{
|
||||
const Byte *cur = p->bufs[state];
|
||||
if (cur == p->lims[state])
|
||||
return SZ_OK;
|
||||
p->bufs[state] = cur + 4;
|
||||
{
|
||||
const UInt32 ip = p->ip + 4;
|
||||
v = GetBe32a(cur) - ip;
|
||||
p->ip = ip;
|
||||
}
|
||||
state = BCJ2_DEC_STATE_ORIG_0;
|
||||
}
|
||||
if ((unsigned)(state - BCJ2_DEC_STATE_ORIG_0) < 4)
|
||||
{
|
||||
Byte *dest = p->dest;
|
||||
if (dest == p->destLim)
|
||||
return SZ_OK;
|
||||
*dest = p->temp[(size_t)p->state - BCJ2_DEC_STATE_ORIG_0];
|
||||
p->state++;
|
||||
p->dest = dest + 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
if (BCJ2_IS_32BIT_STREAM(p->state))
|
||||
{
|
||||
const Byte *cur = p->bufs[p->state];
|
||||
if (cur == p->lims[p->state])
|
||||
return SZ_OK;
|
||||
p->bufs[p->state] = cur + 4;
|
||||
|
||||
{
|
||||
UInt32 val;
|
||||
Byte *dest;
|
||||
SizeT rem;
|
||||
|
||||
p->ip += 4;
|
||||
val = GetBe32(cur) - p->ip;
|
||||
dest = p->dest;
|
||||
rem = p->destLim - dest;
|
||||
if (rem < 4)
|
||||
for (;;)
|
||||
{
|
||||
SizeT i;
|
||||
SetUi32(p->temp, val);
|
||||
for (i = 0; i < rem; i++)
|
||||
dest[i] = p->temp[i];
|
||||
p->dest = dest + rem;
|
||||
p->state = BCJ2_DEC_STATE_ORIG_0 + (unsigned)rem;
|
||||
return SZ_OK;
|
||||
if (dest == p->destLim)
|
||||
{
|
||||
p->state = state;
|
||||
p->temp = v;
|
||||
return SZ_OK;
|
||||
}
|
||||
*dest++ = (Byte)v;
|
||||
p->dest = dest;
|
||||
if (++state == BCJ2_DEC_STATE_ORIG_3 + 1)
|
||||
break;
|
||||
v >>= 8;
|
||||
}
|
||||
SetUi32(dest, val);
|
||||
p->temp[3] = (Byte)(val >> 24);
|
||||
p->dest = dest + 4;
|
||||
p->state = BCJ2_DEC_STATE_OK;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// src = p->bufs[BCJ2_STREAM_MAIN];
|
||||
for (;;)
|
||||
{
|
||||
/*
|
||||
if (BCJ2_IS_32BIT_STREAM(p->state))
|
||||
p->state = BCJ2_DEC_STATE_OK;
|
||||
else
|
||||
*/
|
||||
{
|
||||
if (p->range < kTopValue)
|
||||
{
|
||||
if (p->bufs[BCJ2_STREAM_RC] == p->lims[BCJ2_STREAM_RC])
|
||||
{
|
||||
p->state = BCJ2_STREAM_RC;
|
||||
p->temp = v;
|
||||
return SZ_OK;
|
||||
}
|
||||
p->range <<= 8;
|
||||
p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
|
||||
}
|
||||
|
||||
{
|
||||
const Byte *src = p->bufs[BCJ2_STREAM_MAIN];
|
||||
const Byte *srcLim;
|
||||
Byte *dest;
|
||||
SizeT num = (SizeT)(p->lims[BCJ2_STREAM_MAIN] - src);
|
||||
|
||||
if (num == 0)
|
||||
Byte *dest = p->dest;
|
||||
{
|
||||
p->state = BCJ2_STREAM_MAIN;
|
||||
return SZ_OK;
|
||||
const SizeT rem = (SizeT)(p->lims[BCJ2_STREAM_MAIN] - src);
|
||||
SizeT num = (SizeT)(p->destLim - dest);
|
||||
if (num >= rem)
|
||||
num = rem;
|
||||
#define NUM_ITERS 4
|
||||
#if (NUM_ITERS & (NUM_ITERS - 1)) == 0
|
||||
num &= ~((SizeT)NUM_ITERS - 1); // if (NUM_ITERS == (1 << x))
|
||||
#else
|
||||
num -= num % NUM_ITERS; // if (NUM_ITERS != (1 << x))
|
||||
#endif
|
||||
srcLim = src + num;
|
||||
}
|
||||
|
||||
dest = p->dest;
|
||||
if (num > (SizeT)(p->destLim - dest))
|
||||
|
||||
#define NUM_SHIFT_BITS 24
|
||||
#define ONE_ITER(indx) { \
|
||||
const unsigned b = src[indx]; \
|
||||
*dest++ = (Byte)b; \
|
||||
v = (v << NUM_SHIFT_BITS) | b; \
|
||||
if (((b + (0x100 - 0xe8)) & 0xfe) == 0) break; \
|
||||
if (((v - (((UInt32)0x0f << (NUM_SHIFT_BITS)) + 0x80)) & \
|
||||
((((UInt32)1 << (4 + NUM_SHIFT_BITS)) - 0x1) << 4)) == 0) break; \
|
||||
/* ++dest */; /* v = b; */ }
|
||||
|
||||
if (src != srcLim)
|
||||
for (;;)
|
||||
{
|
||||
num = (SizeT)(p->destLim - dest);
|
||||
if (num == 0)
|
||||
/* The dependency chain of 2-cycle for (v) calculation is not big problem here.
|
||||
But we can remove dependency chain with v = b in the end of loop. */
|
||||
ONE_ITER(0)
|
||||
#if (NUM_ITERS > 1)
|
||||
ONE_ITER(1)
|
||||
#if (NUM_ITERS > 2)
|
||||
ONE_ITER(2)
|
||||
#if (NUM_ITERS > 3)
|
||||
ONE_ITER(3)
|
||||
#if (NUM_ITERS > 4)
|
||||
ONE_ITER(4)
|
||||
#if (NUM_ITERS > 5)
|
||||
ONE_ITER(5)
|
||||
#if (NUM_ITERS > 6)
|
||||
ONE_ITER(6)
|
||||
#if (NUM_ITERS > 7)
|
||||
ONE_ITER(7)
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
src += NUM_ITERS;
|
||||
if (src == srcLim)
|
||||
break;
|
||||
}
|
||||
|
||||
if (src == srcLim)
|
||||
#if (NUM_ITERS > 1)
|
||||
for (;;)
|
||||
#endif
|
||||
{
|
||||
#if (NUM_ITERS > 1)
|
||||
if (src == p->lims[BCJ2_STREAM_MAIN] || dest == p->destLim)
|
||||
#endif
|
||||
{
|
||||
p->state = BCJ2_DEC_STATE_ORIG;
|
||||
const SizeT num = (SizeT)(src - p->bufs[BCJ2_STREAM_MAIN]);
|
||||
p->bufs[BCJ2_STREAM_MAIN] = src;
|
||||
p->dest = dest;
|
||||
p->ip += (UInt32)num;
|
||||
/* state BCJ2_STREAM_MAIN has more priority than BCJ2_STATE_ORIG */
|
||||
p->state =
|
||||
src == p->lims[BCJ2_STREAM_MAIN] ?
|
||||
(unsigned)BCJ2_STREAM_MAIN :
|
||||
(unsigned)BCJ2_DEC_STATE_ORIG;
|
||||
p->temp = v;
|
||||
return SZ_OK;
|
||||
}
|
||||
#if (NUM_ITERS > 1)
|
||||
ONE_ITER(0)
|
||||
src++;
|
||||
#endif
|
||||
}
|
||||
|
||||
srcLim = src + num;
|
||||
|
||||
if (p->temp[3] == 0x0F && (src[0] & 0xF0) == 0x80)
|
||||
*dest = src[0];
|
||||
else for (;;)
|
||||
{
|
||||
Byte b = *src;
|
||||
*dest = b;
|
||||
if (b != 0x0F)
|
||||
{
|
||||
if ((b & 0xFE) == 0xE8)
|
||||
break;
|
||||
dest++;
|
||||
if (++src != srcLim)
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
dest++;
|
||||
if (++src == srcLim)
|
||||
break;
|
||||
if ((*src & 0xF0) != 0x80)
|
||||
continue;
|
||||
*dest = *src;
|
||||
break;
|
||||
}
|
||||
|
||||
num = (SizeT)(src - p->bufs[BCJ2_STREAM_MAIN]);
|
||||
|
||||
if (src == srcLim)
|
||||
{
|
||||
p->temp[3] = src[-1];
|
||||
p->bufs[BCJ2_STREAM_MAIN] = src;
|
||||
const SizeT num = (SizeT)(dest - p->dest);
|
||||
p->dest = dest; // p->dest += num;
|
||||
p->bufs[BCJ2_STREAM_MAIN] += num; // = src;
|
||||
p->ip += (UInt32)num;
|
||||
p->dest += num;
|
||||
p->state =
|
||||
p->bufs[BCJ2_STREAM_MAIN] ==
|
||||
p->lims[BCJ2_STREAM_MAIN] ?
|
||||
(unsigned)BCJ2_STREAM_MAIN :
|
||||
(unsigned)BCJ2_DEC_STATE_ORIG;
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
{
|
||||
UInt32 bound, ttt;
|
||||
CProb *prob;
|
||||
Byte b = src[0];
|
||||
Byte prev = (Byte)(num == 0 ? p->temp[3] : src[-1]);
|
||||
|
||||
p->temp[3] = b;
|
||||
p->bufs[BCJ2_STREAM_MAIN] = src + 1;
|
||||
num++;
|
||||
p->ip += (UInt32)num;
|
||||
p->dest += num;
|
||||
|
||||
prob = p->probs + (unsigned)(b == 0xE8 ? 2 + (unsigned)prev : (b == 0xE9 ? 1 : 0));
|
||||
|
||||
_IF_BIT_0
|
||||
CBcj2Prob *prob; // unsigned index;
|
||||
/*
|
||||
prob = p->probs + (unsigned)((Byte)v == 0xe8 ?
|
||||
2 + (Byte)(v >> 8) :
|
||||
((v >> 5) & 1)); // ((Byte)v < 0xe8 ? 0 : 1));
|
||||
*/
|
||||
{
|
||||
_UPDATE_0
|
||||
const unsigned c = ((v + 0x17) >> 6) & 1;
|
||||
prob = p->probs + (unsigned)
|
||||
(((0 - c) & (Byte)(v >> NUM_SHIFT_BITS)) + c + ((v >> 5) & 1));
|
||||
// (Byte)
|
||||
// 8x->0 : e9->1 : xxe8->xx+2
|
||||
// 8x->0x100 : e9->0x101 : xxe8->xx
|
||||
// (((0x100 - (e & ~v)) & (0x100 | (v >> 8))) + (e & v));
|
||||
// (((0x101 + (~e | v)) & (0x100 | (v >> 8))) + (e & v));
|
||||
}
|
||||
ttt = *prob;
|
||||
bound = (p->range >> kNumBitModelTotalBits) * ttt;
|
||||
if (p->code < bound)
|
||||
{
|
||||
// bcj2_stats[prob - p->probs][0]++;
|
||||
p->range = bound;
|
||||
*prob = (CBcj2Prob)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
|
||||
continue;
|
||||
}
|
||||
_UPDATE_1
|
||||
|
||||
{
|
||||
// bcj2_stats[prob - p->probs][1]++;
|
||||
p->range -= bound;
|
||||
p->code -= bound;
|
||||
*prob = (CBcj2Prob)(ttt - (ttt >> kNumMoveBits));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
UInt32 val;
|
||||
unsigned cj = (p->temp[3] == 0xE8) ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP;
|
||||
/* (v == 0xe8 ? 0 : 1) uses setcc instruction with additional zero register usage in x64 MSVC. */
|
||||
// const unsigned cj = ((Byte)v == 0xe8) ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP;
|
||||
const unsigned cj = (((v + 0x57) >> 6) & 1) + BCJ2_STREAM_CALL;
|
||||
const Byte *cur = p->bufs[cj];
|
||||
Byte *dest;
|
||||
SizeT rem;
|
||||
|
||||
if (cur == p->lims[cj])
|
||||
{
|
||||
p->state = cj;
|
||||
break;
|
||||
}
|
||||
|
||||
val = GetBe32(cur);
|
||||
v = GetBe32a(cur);
|
||||
p->bufs[cj] = cur + 4;
|
||||
|
||||
p->ip += 4;
|
||||
val -= p->ip;
|
||||
{
|
||||
const UInt32 ip = p->ip + 4;
|
||||
v -= ip;
|
||||
p->ip = ip;
|
||||
}
|
||||
dest = p->dest;
|
||||
rem = (SizeT)(p->destLim - dest);
|
||||
|
||||
if (rem < 4)
|
||||
{
|
||||
p->temp[0] = (Byte)val; if (rem > 0) dest[0] = (Byte)val; val >>= 8;
|
||||
p->temp[1] = (Byte)val; if (rem > 1) dest[1] = (Byte)val; val >>= 8;
|
||||
p->temp[2] = (Byte)val; if (rem > 2) dest[2] = (Byte)val; val >>= 8;
|
||||
p->temp[3] = (Byte)val;
|
||||
if ((unsigned)rem > 0) { dest[0] = (Byte)v; v >>= 8;
|
||||
if ((unsigned)rem > 1) { dest[1] = (Byte)v; v >>= 8;
|
||||
if ((unsigned)rem > 2) { dest[2] = (Byte)v; v >>= 8; }}}
|
||||
p->temp = v;
|
||||
p->dest = dest + rem;
|
||||
p->state = BCJ2_DEC_STATE_ORIG_0 + (unsigned)rem;
|
||||
break;
|
||||
}
|
||||
|
||||
SetUi32(dest, val);
|
||||
p->temp[3] = (Byte)(val >> 24);
|
||||
SetUi32(dest, v)
|
||||
v >>= 24;
|
||||
p->dest = dest + 4;
|
||||
}
|
||||
}
|
||||
|
@ -252,6 +278,13 @@ SRes Bcj2Dec_Decode(CBcj2Dec *p)
|
|||
p->range <<= 8;
|
||||
p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++;
|
||||
}
|
||||
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
#undef NUM_ITERS
|
||||
#undef ONE_ITER
|
||||
#undef NUM_SHIFT_BITS
|
||||
#undef kTopValue
|
||||
#undef kNumBitModelTotalBits
|
||||
#undef kBitModelTotal
|
||||
#undef kNumMoveBits
|
||||
|
|
|
@ -1,60 +1,62 @@
|
|||
/* Bcj2Enc.c -- BCJ2 Encoder (Converter for x86 code)
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
/* Bcj2Enc.c -- BCJ2 Encoder converter for x86 code (Branch CALL/JUMP variant2)
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
/* #define SHOW_STAT */
|
||||
|
||||
#ifdef SHOW_STAT
|
||||
#include <stdio.h>
|
||||
#define PRF(x) x
|
||||
#define PRF2(s) printf("%s ip=%8x tempPos=%d src= %8x\n", s, (unsigned)p->ip64, p->tempPos, (unsigned)(p->srcLim - p->src));
|
||||
#else
|
||||
#define PRF(x)
|
||||
#define PRF2(s)
|
||||
#endif
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "Bcj2.h"
|
||||
#include "CpuArch.h"
|
||||
|
||||
#define CProb UInt16
|
||||
|
||||
#define kTopValue ((UInt32)1 << 24)
|
||||
#define kNumModelBits 11
|
||||
#define kBitModelTotal (1 << kNumModelBits)
|
||||
#define kNumBitModelTotalBits 11
|
||||
#define kBitModelTotal (1 << kNumBitModelTotalBits)
|
||||
#define kNumMoveBits 5
|
||||
|
||||
void Bcj2Enc_Init(CBcj2Enc *p)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
p->state = BCJ2_ENC_STATE_OK;
|
||||
p->state = BCJ2_ENC_STATE_ORIG;
|
||||
p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE;
|
||||
|
||||
p->prevByte = 0;
|
||||
|
||||
p->context = 0;
|
||||
p->flushRem = 5;
|
||||
p->isFlushState = 0;
|
||||
p->cache = 0;
|
||||
p->range = 0xFFFFFFFF;
|
||||
p->range = 0xffffffff;
|
||||
p->low = 0;
|
||||
p->cacheSize = 1;
|
||||
|
||||
p->ip = 0;
|
||||
|
||||
p->fileIp = 0;
|
||||
p->fileSize = 0;
|
||||
p->relatLimit = BCJ2_RELAT_LIMIT;
|
||||
|
||||
p->ip64 = 0;
|
||||
p->fileIp64 = 0;
|
||||
p->fileSize64_minus1 = BCJ2_ENC_FileSizeField_UNLIMITED;
|
||||
p->relatLimit = BCJ2_ENC_RELAT_LIMIT_DEFAULT;
|
||||
// p->relatExcludeBits = 0;
|
||||
p->tempPos = 0;
|
||||
|
||||
p->flushPos = 0;
|
||||
|
||||
for (i = 0; i < sizeof(p->probs) / sizeof(p->probs[0]); i++)
|
||||
p->probs[i] = kBitModelTotal >> 1;
|
||||
}
|
||||
|
||||
static BoolInt MY_FAST_CALL RangeEnc_ShiftLow(CBcj2Enc *p)
|
||||
// Z7_NO_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
static BoolInt Bcj2_RangeEnc_ShiftLow(CBcj2Enc *p)
|
||||
{
|
||||
if ((UInt32)p->low < (UInt32)0xFF000000 || (UInt32)(p->low >> 32) != 0)
|
||||
const UInt32 low = (UInt32)p->low;
|
||||
const unsigned high = (unsigned)
|
||||
#if defined(Z7_MSC_VER_ORIGINAL) \
|
||||
&& defined(MY_CPU_X86) \
|
||||
&& defined(MY_CPU_LE) \
|
||||
&& !defined(MY_CPU_64BIT)
|
||||
// we try to rid of __aullshr() call in MSVS-x86
|
||||
(((const UInt32 *)&p->low)[1]); // [1] : for little-endian only
|
||||
#else
|
||||
(p->low >> 32);
|
||||
#endif
|
||||
if (low < (UInt32)0xff000000 || high != 0)
|
||||
{
|
||||
Byte *buf = p->bufs[BCJ2_STREAM_RC];
|
||||
do
|
||||
|
@ -65,247 +67,440 @@ static BoolInt MY_FAST_CALL RangeEnc_ShiftLow(CBcj2Enc *p)
|
|||
p->bufs[BCJ2_STREAM_RC] = buf;
|
||||
return True;
|
||||
}
|
||||
*buf++ = (Byte)(p->cache + (Byte)(p->low >> 32));
|
||||
p->cache = 0xFF;
|
||||
*buf++ = (Byte)(p->cache + high);
|
||||
p->cache = 0xff;
|
||||
}
|
||||
while (--p->cacheSize);
|
||||
p->bufs[BCJ2_STREAM_RC] = buf;
|
||||
p->cache = (Byte)((UInt32)p->low >> 24);
|
||||
p->cache = (Byte)(low >> 24);
|
||||
}
|
||||
p->cacheSize++;
|
||||
p->low = (UInt32)p->low << 8;
|
||||
p->low = low << 8;
|
||||
return False;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
We can use 2 alternative versions of code:
|
||||
1) non-marker version:
|
||||
Byte CBcj2Enc::context
|
||||
Byte temp[8];
|
||||
Last byte of marker (e8/e9/[0f]8x) can be written to temp[] buffer.
|
||||
Encoder writes last byte of marker (e8/e9/[0f]8x) to dest, only in conjunction
|
||||
with writing branch symbol to range coder in same Bcj2Enc_Encode_2() call.
|
||||
|
||||
2) marker version:
|
||||
UInt32 CBcj2Enc::context
|
||||
Byte CBcj2Enc::temp[4];
|
||||
MARKER_FLAG in CBcj2Enc::context shows that CBcj2Enc::context contains finded marker.
|
||||
it's allowed that
|
||||
one call of Bcj2Enc_Encode_2() writes last byte of marker (e8/e9/[0f]8x) to dest,
|
||||
and another call of Bcj2Enc_Encode_2() does offset conversion.
|
||||
So different values of (fileIp) and (fileSize) are possible
|
||||
in these different Bcj2Enc_Encode_2() calls.
|
||||
|
||||
Also marker version requires additional if((v & MARKER_FLAG) == 0) check in main loop.
|
||||
So we use non-marker version.
|
||||
*/
|
||||
|
||||
/*
|
||||
Corner cases with overlap in multi-block.
|
||||
before v23: there was one corner case, where converted instruction
|
||||
could start in one sub-stream and finish in next sub-stream.
|
||||
If multi-block (solid) encoding is used,
|
||||
and BCJ2_ENC_FINISH_MODE_END_BLOCK is used for each sub-stream.
|
||||
and (0f) is last byte of previous sub-stream
|
||||
and (8x) is first byte of current sub-stream
|
||||
then (0f 8x) pair is treated as marker by BCJ2 encoder and decoder.
|
||||
BCJ2 encoder can converts 32-bit offset for that (0f 8x) cortage,
|
||||
if that offset meets limit requirements.
|
||||
If encoder allows 32-bit offset conversion for such overlap case,
|
||||
then the data in 3 uncompressed BCJ2 streams for some sub-stream
|
||||
can depend from data of previous sub-stream.
|
||||
That corner case is not big problem, and it's rare case.
|
||||
Since v23.00 we do additional check to prevent conversions in such overlap cases.
|
||||
*/
|
||||
|
||||
/*
|
||||
Bcj2Enc_Encode_2() output variables at exit:
|
||||
{
|
||||
if (Bcj2Enc_Encode_2() exits with (p->state == BCJ2_ENC_STATE_ORIG))
|
||||
{
|
||||
it means that encoder needs more input data.
|
||||
if (p->srcLim == p->src) at exit, then
|
||||
{
|
||||
(p->finishMode != BCJ2_ENC_FINISH_MODE_END_STREAM)
|
||||
all input data were read and processed, and we are ready for
|
||||
new input data.
|
||||
}
|
||||
else
|
||||
{
|
||||
(p->srcLim != p->src)
|
||||
(p->finishMode == BCJ2_ENC_FINISH_MODE_CONTINUE)
|
||||
The encoder have found e8/e9/0f_8x marker,
|
||||
and p->src points to last byte of that marker,
|
||||
Bcj2Enc_Encode_2() needs more input data to get totally
|
||||
5 bytes (last byte of marker and 32-bit branch offset)
|
||||
as continuous array starting from p->src.
|
||||
(p->srcLim - p->src < 5) requirement is met after exit.
|
||||
So non-processed resedue from p->src to p->srcLim is always less than 5 bytes.
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
Z7_NO_INLINE
|
||||
static void Bcj2Enc_Encode_2(CBcj2Enc *p)
|
||||
{
|
||||
if (BCJ2_IS_32BIT_STREAM(p->state))
|
||||
if (!p->isFlushState)
|
||||
{
|
||||
Byte *cur = p->bufs[p->state];
|
||||
if (cur == p->lims[p->state])
|
||||
return;
|
||||
SetBe32(cur, p->tempTarget);
|
||||
p->bufs[p->state] = cur + 4;
|
||||
}
|
||||
|
||||
p->state = BCJ2_ENC_STATE_ORIG;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (p->range < kTopValue)
|
||||
{
|
||||
if (RangeEnc_ShiftLow(p))
|
||||
return;
|
||||
p->range <<= 8;
|
||||
}
|
||||
|
||||
const Byte *src;
|
||||
UInt32 v;
|
||||
{
|
||||
const unsigned state = p->state;
|
||||
if (BCJ2_IS_32BIT_STREAM(state))
|
||||
{
|
||||
Byte *cur = p->bufs[state];
|
||||
if (cur == p->lims[state])
|
||||
return;
|
||||
SetBe32a(cur, p->tempTarget)
|
||||
p->bufs[state] = cur + 4;
|
||||
}
|
||||
}
|
||||
p->state = BCJ2_ENC_STATE_ORIG; // for main reason of exit
|
||||
src = p->src;
|
||||
v = p->context;
|
||||
|
||||
// #define WRITE_CONTEXT p->context = v; // for marker version
|
||||
#define WRITE_CONTEXT p->context = (Byte)v;
|
||||
#define WRITE_CONTEXT_AND_SRC p->src = src; WRITE_CONTEXT
|
||||
|
||||
for (;;)
|
||||
{
|
||||
// const Byte *src;
|
||||
// UInt32 v;
|
||||
CBcj2Enc_ip_unsigned ip;
|
||||
if (p->range < kTopValue)
|
||||
{
|
||||
// to reduce register pressure and code size: we save and restore local variables.
|
||||
WRITE_CONTEXT_AND_SRC
|
||||
if (Bcj2_RangeEnc_ShiftLow(p))
|
||||
return;
|
||||
p->range <<= 8;
|
||||
src = p->src;
|
||||
v = p->context;
|
||||
}
|
||||
// src = p->src;
|
||||
// #define MARKER_FLAG ((UInt32)1 << 17)
|
||||
// if ((v & MARKER_FLAG) == 0) // for marker version
|
||||
{
|
||||
const Byte *src = p->src;
|
||||
const Byte *srcLim;
|
||||
Byte *dest;
|
||||
SizeT num = (SizeT)(p->srcLim - src);
|
||||
|
||||
if (p->finishMode == BCJ2_ENC_FINISH_MODE_CONTINUE)
|
||||
Byte *dest = p->bufs[BCJ2_STREAM_MAIN];
|
||||
{
|
||||
if (num <= 4)
|
||||
return;
|
||||
num -= 4;
|
||||
const SizeT remSrc = (SizeT)(p->srcLim - src);
|
||||
SizeT rem = (SizeT)(p->lims[BCJ2_STREAM_MAIN] - dest);
|
||||
if (rem >= remSrc)
|
||||
rem = remSrc;
|
||||
srcLim = src + rem;
|
||||
}
|
||||
else if (num == 0)
|
||||
break;
|
||||
/* p->context contains context of previous byte:
|
||||
bits [0 : 7] : src[-1], if (src) was changed in this call
|
||||
bits [8 : 31] : are undefined for non-marker version
|
||||
*/
|
||||
// v = p->context;
|
||||
#define NUM_SHIFT_BITS 24
|
||||
#define CONV_FLAG ((UInt32)1 << 16)
|
||||
#define ONE_ITER { \
|
||||
b = src[0]; \
|
||||
*dest++ = (Byte)b; \
|
||||
v = (v << NUM_SHIFT_BITS) | b; \
|
||||
if (((b + (0x100 - 0xe8)) & 0xfe) == 0) break; \
|
||||
if (((v - (((UInt32)0x0f << (NUM_SHIFT_BITS)) + 0x80)) & \
|
||||
((((UInt32)1 << (4 + NUM_SHIFT_BITS)) - 0x1) << 4)) == 0) break; \
|
||||
src++; if (src == srcLim) { break; } }
|
||||
|
||||
dest = p->bufs[BCJ2_STREAM_MAIN];
|
||||
if (num > (SizeT)(p->lims[BCJ2_STREAM_MAIN] - dest))
|
||||
if (src != srcLim)
|
||||
for (;;)
|
||||
{
|
||||
num = (SizeT)(p->lims[BCJ2_STREAM_MAIN] - dest);
|
||||
if (num == 0)
|
||||
/* clang can generate ineffective code with setne instead of two jcc instructions.
|
||||
we can use 2 iterations and external (unsigned b) to avoid that ineffective code genaration. */
|
||||
unsigned b;
|
||||
ONE_ITER
|
||||
ONE_ITER
|
||||
}
|
||||
|
||||
ip = p->ip64 + (CBcj2Enc_ip_unsigned)(SizeT)(dest - p->bufs[BCJ2_STREAM_MAIN]);
|
||||
p->bufs[BCJ2_STREAM_MAIN] = dest;
|
||||
p->ip64 = ip;
|
||||
|
||||
if (src == srcLim)
|
||||
{
|
||||
WRITE_CONTEXT_AND_SRC
|
||||
if (src != p->srcLim)
|
||||
{
|
||||
p->state = BCJ2_STREAM_MAIN;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
srcLim = src + num;
|
||||
|
||||
if (p->prevByte == 0x0F && (src[0] & 0xF0) == 0x80)
|
||||
*dest = src[0];
|
||||
else for (;;)
|
||||
{
|
||||
Byte b = *src;
|
||||
*dest = b;
|
||||
if (b != 0x0F)
|
||||
{
|
||||
if ((b & 0xFE) == 0xE8)
|
||||
break;
|
||||
dest++;
|
||||
if (++src != srcLim)
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
dest++;
|
||||
if (++src == srcLim)
|
||||
break;
|
||||
if ((*src & 0xF0) != 0x80)
|
||||
continue;
|
||||
*dest = *src;
|
||||
/* (p->src == p->srcLim)
|
||||
(p->state == BCJ2_ENC_STATE_ORIG) */
|
||||
if (p->finishMode != BCJ2_ENC_FINISH_MODE_END_STREAM)
|
||||
return;
|
||||
/* (p->finishMode == BCJ2_ENC_FINISH_MODE_END_STREAM */
|
||||
// (p->flushRem == 5);
|
||||
p->isFlushState = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
num = (SizeT)(src - p->src);
|
||||
|
||||
if (src == srcLim)
|
||||
src++;
|
||||
// p->src = src;
|
||||
}
|
||||
// ip = p->ip; // for marker version
|
||||
/* marker was found */
|
||||
/* (v) contains marker that was found:
|
||||
bits [NUM_SHIFT_BITS : NUM_SHIFT_BITS + 7]
|
||||
: value of src[-2] : xx/xx/0f
|
||||
bits [0 : 7] : value of src[-1] : e8/e9/8x
|
||||
*/
|
||||
{
|
||||
{
|
||||
p->prevByte = src[-1];
|
||||
p->bufs[BCJ2_STREAM_MAIN] = dest;
|
||||
p->src = src;
|
||||
p->ip += (UInt32)num;
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
Byte context = (Byte)(num == 0 ? p->prevByte : src[-1]);
|
||||
BoolInt needConvert;
|
||||
|
||||
p->bufs[BCJ2_STREAM_MAIN] = dest + 1;
|
||||
p->ip += (UInt32)num + 1;
|
||||
src++;
|
||||
|
||||
needConvert = False;
|
||||
|
||||
#if NUM_SHIFT_BITS != 24
|
||||
v &= ~(UInt32)CONV_FLAG;
|
||||
#endif
|
||||
// UInt32 relat = 0;
|
||||
if ((SizeT)(p->srcLim - src) >= 4)
|
||||
{
|
||||
UInt32 relatVal = GetUi32(src);
|
||||
if ((p->fileSize == 0 || (UInt32)(p->ip + 4 + relatVal - p->fileIp) < p->fileSize)
|
||||
&& ((relatVal + p->relatLimit) >> 1) < p->relatLimit)
|
||||
needConvert = True;
|
||||
}
|
||||
|
||||
{
|
||||
UInt32 bound;
|
||||
unsigned ttt;
|
||||
Byte b = src[-1];
|
||||
CProb *prob = p->probs + (unsigned)(b == 0xE8 ? 2 + (unsigned)context : (b == 0xE9 ? 1 : 0));
|
||||
|
||||
ttt = *prob;
|
||||
bound = (p->range >> kNumModelBits) * ttt;
|
||||
|
||||
if (!needConvert)
|
||||
/*
|
||||
if (relat != 0 || (Byte)v != 0xe8)
|
||||
BoolInt isBigOffset = True;
|
||||
*/
|
||||
const UInt32 relat = GetUi32(src);
|
||||
/*
|
||||
#define EXCLUDE_FLAG ((UInt32)1 << 4)
|
||||
#define NEED_CONVERT(rel) ((((rel) + EXCLUDE_FLAG) & (0 - EXCLUDE_FLAG * 2)) != 0)
|
||||
if (p->relatExcludeBits != 0)
|
||||
{
|
||||
const UInt32 flag = (UInt32)1 << (p->relatExcludeBits - 1);
|
||||
isBigOffset = (((relat + flag) & (0 - flag * 2)) != 0);
|
||||
}
|
||||
// isBigOffset = False; // for debug
|
||||
*/
|
||||
ip -= p->fileIp64;
|
||||
// Use the following if check, if (ip) is 64-bit:
|
||||
if (ip > (((v + 0x20) >> 5) & 1)) // 23.00 : we eliminate milti-block overlap for (Of 80) and (e8/e9)
|
||||
if ((CBcj2Enc_ip_unsigned)((CBcj2Enc_ip_signed)ip + 4 + (Int32)relat) <= p->fileSize64_minus1)
|
||||
if (((UInt32)(relat + p->relatLimit) >> 1) < p->relatLimit)
|
||||
v |= CONV_FLAG;
|
||||
}
|
||||
else if (p->finishMode == BCJ2_ENC_FINISH_MODE_CONTINUE)
|
||||
{
|
||||
// (p->srcLim - src < 4)
|
||||
// /*
|
||||
// for non-marker version
|
||||
p->ip64--; // p->ip = ip - 1;
|
||||
p->bufs[BCJ2_STREAM_MAIN]--;
|
||||
src--;
|
||||
v >>= NUM_SHIFT_BITS;
|
||||
// (0 < p->srcLim - p->src <= 4)
|
||||
// */
|
||||
// v |= MARKER_FLAG; // for marker version
|
||||
/* (p->state == BCJ2_ENC_STATE_ORIG) */
|
||||
WRITE_CONTEXT_AND_SRC
|
||||
return;
|
||||
}
|
||||
{
|
||||
const unsigned c = ((v + 0x17) >> 6) & 1;
|
||||
CBcj2Prob *prob = p->probs + (unsigned)
|
||||
(((0 - c) & (Byte)(v >> NUM_SHIFT_BITS)) + c + ((v >> 5) & 1));
|
||||
/*
|
||||
((Byte)v == 0xe8 ? 2 + ((Byte)(v >> 8)) :
|
||||
((Byte)v < 0xe8 ? 0 : 1)); // ((v >> 5) & 1));
|
||||
*/
|
||||
const unsigned ttt = *prob;
|
||||
const UInt32 bound = (p->range >> kNumBitModelTotalBits) * ttt;
|
||||
if ((v & CONV_FLAG) == 0)
|
||||
{
|
||||
// static int yyy = 0; yyy++; printf("\n!needConvert = %d\n", yyy);
|
||||
// v = (Byte)v; // for marker version
|
||||
p->range = bound;
|
||||
*prob = (CProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
|
||||
p->src = src;
|
||||
p->prevByte = b;
|
||||
*prob = (CBcj2Prob)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
|
||||
// WRITE_CONTEXT_AND_SRC
|
||||
continue;
|
||||
}
|
||||
|
||||
p->low += bound;
|
||||
p->range -= bound;
|
||||
*prob = (CProb)(ttt - (ttt >> kNumMoveBits));
|
||||
|
||||
*prob = (CBcj2Prob)(ttt - (ttt >> kNumMoveBits));
|
||||
}
|
||||
// p->context = src[3];
|
||||
{
|
||||
// const unsigned cj = ((Byte)v == 0xe8 ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP);
|
||||
const unsigned cj = (((v + 0x57) >> 6) & 1) + BCJ2_STREAM_CALL;
|
||||
ip = p->ip64;
|
||||
v = GetUi32(src); // relat
|
||||
ip += 4;
|
||||
p->ip64 = ip;
|
||||
src += 4;
|
||||
// p->src = src;
|
||||
{
|
||||
UInt32 relatVal = GetUi32(src);
|
||||
UInt32 absVal;
|
||||
p->ip += 4;
|
||||
absVal = p->ip + relatVal;
|
||||
p->prevByte = src[3];
|
||||
src += 4;
|
||||
p->src = src;
|
||||
const UInt32 absol = (UInt32)ip + v;
|
||||
Byte *cur = p->bufs[cj];
|
||||
v >>= 24;
|
||||
// WRITE_CONTEXT
|
||||
if (cur == p->lims[cj])
|
||||
{
|
||||
unsigned cj = (b == 0xE8) ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP;
|
||||
Byte *cur = p->bufs[cj];
|
||||
if (cur == p->lims[cj])
|
||||
{
|
||||
p->state = cj;
|
||||
p->tempTarget = absVal;
|
||||
return;
|
||||
}
|
||||
SetBe32(cur, absVal);
|
||||
p->bufs[cj] = cur + 4;
|
||||
p->state = cj;
|
||||
p->tempTarget = absol;
|
||||
WRITE_CONTEXT_AND_SRC
|
||||
return;
|
||||
}
|
||||
SetBe32a(cur, absol)
|
||||
p->bufs[cj] = cur + 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end of loop
|
||||
}
|
||||
|
||||
if (p->finishMode != BCJ2_ENC_FINISH_MODE_END_STREAM)
|
||||
return;
|
||||
|
||||
for (; p->flushPos < 5; p->flushPos++)
|
||||
if (RangeEnc_ShiftLow(p))
|
||||
for (; p->flushRem != 0; p->flushRem--)
|
||||
if (Bcj2_RangeEnc_ShiftLow(p))
|
||||
return;
|
||||
p->state = BCJ2_ENC_STATE_OK;
|
||||
p->state = BCJ2_ENC_STATE_FINISHED;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
BCJ2 encoder needs look ahead for up to 4 bytes in (src) buffer.
|
||||
So base function Bcj2Enc_Encode_2()
|
||||
in BCJ2_ENC_FINISH_MODE_CONTINUE mode can return with
|
||||
(p->state == BCJ2_ENC_STATE_ORIG && p->src < p->srcLim)
|
||||
Bcj2Enc_Encode() solves that look ahead problem by using p->temp[] buffer.
|
||||
so if (p->state == BCJ2_ENC_STATE_ORIG) after Bcj2Enc_Encode(),
|
||||
then (p->src == p->srcLim).
|
||||
And the caller's code is simpler with Bcj2Enc_Encode().
|
||||
*/
|
||||
|
||||
Z7_NO_INLINE
|
||||
void Bcj2Enc_Encode(CBcj2Enc *p)
|
||||
{
|
||||
PRF(printf("\n"));
|
||||
PRF(printf("---- ip = %8d tempPos = %8d src = %8d\n", p->ip, p->tempPos, p->srcLim - p->src));
|
||||
|
||||
PRF2("\n----")
|
||||
if (p->tempPos != 0)
|
||||
{
|
||||
/* extra: number of bytes that were copied from (src) to (temp) buffer in this call */
|
||||
unsigned extra = 0;
|
||||
|
||||
/* We will touch only minimal required number of bytes in input (src) stream.
|
||||
So we will add input bytes from (src) stream to temp[] with step of 1 byte.
|
||||
We don't add new bytes to temp[] before Bcj2Enc_Encode_2() call
|
||||
in first loop iteration because
|
||||
- previous call of Bcj2Enc_Encode() could use another (finishMode),
|
||||
- previous call could finish with (p->state != BCJ2_ENC_STATE_ORIG).
|
||||
the case with full temp[] buffer (p->tempPos == 4) is possible here.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
// (0 < p->tempPos <= 5) // in non-marker version
|
||||
/* p->src : the current src data position including extra bytes
|
||||
that were copied to temp[] buffer in this call */
|
||||
const Byte *src = p->src;
|
||||
const Byte *srcLim = p->srcLim;
|
||||
EBcj2Enc_FinishMode finishMode = p->finishMode;
|
||||
|
||||
const EBcj2Enc_FinishMode finishMode = p->finishMode;
|
||||
if (src != srcLim)
|
||||
{
|
||||
/* if there are some src data after the data copied to temp[],
|
||||
then we use MODE_CONTINUE for temp data */
|
||||
p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE;
|
||||
}
|
||||
p->src = p->temp;
|
||||
p->srcLim = p->temp + p->tempPos;
|
||||
if (src != srcLim)
|
||||
p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE;
|
||||
|
||||
PRF(printf(" ip = %8d tempPos = %8d src = %8d\n", p->ip, p->tempPos, p->srcLim - p->src));
|
||||
|
||||
PRF2(" ")
|
||||
Bcj2Enc_Encode_2(p);
|
||||
|
||||
{
|
||||
unsigned num = (unsigned)(p->src - p->temp);
|
||||
unsigned tempPos = p->tempPos - num;
|
||||
const unsigned num = (unsigned)(p->src - p->temp);
|
||||
const unsigned tempPos = p->tempPos - num;
|
||||
unsigned i;
|
||||
p->tempPos = tempPos;
|
||||
for (i = 0; i < tempPos; i++)
|
||||
p->temp[i] = p->temp[(size_t)i + num];
|
||||
|
||||
p->temp[i] = p->temp[(SizeT)i + num];
|
||||
// tempPos : number of bytes in temp buffer
|
||||
p->src = src;
|
||||
p->srcLim = srcLim;
|
||||
p->finishMode = finishMode;
|
||||
|
||||
if (p->state != BCJ2_ENC_STATE_ORIG || src == srcLim)
|
||||
if (p->state != BCJ2_ENC_STATE_ORIG)
|
||||
{
|
||||
// (p->tempPos <= 4) // in non-marker version
|
||||
/* if (the reason of exit from Bcj2Enc_Encode_2()
|
||||
is not BCJ2_ENC_STATE_ORIG),
|
||||
then we exit from Bcj2Enc_Encode() with same reason */
|
||||
// optional code begin : we rollback (src) and tempPos, if it's possible:
|
||||
if (extra >= tempPos)
|
||||
extra = tempPos;
|
||||
p->src = src - extra;
|
||||
p->tempPos = tempPos - extra;
|
||||
// optional code end : rollback of (src) and tempPos
|
||||
return;
|
||||
|
||||
}
|
||||
/* (p->tempPos <= 4)
|
||||
(p->state == BCJ2_ENC_STATE_ORIG)
|
||||
so encoder needs more data than in temp[] */
|
||||
if (src == srcLim)
|
||||
return; // src buffer has no more input data.
|
||||
/* (src != srcLim)
|
||||
so we can provide more input data from src for Bcj2Enc_Encode_2() */
|
||||
if (extra >= tempPos)
|
||||
{
|
||||
p->src = src - tempPos;
|
||||
/* (extra >= tempPos) means that temp buffer contains
|
||||
only data from src buffer of this call.
|
||||
So now we can encode without temp buffer */
|
||||
p->src = src - tempPos; // rollback (src)
|
||||
p->tempPos = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
p->temp[tempPos] = src[0];
|
||||
// we append one additional extra byte from (src) to temp[] buffer:
|
||||
p->temp[tempPos] = *src;
|
||||
p->tempPos = tempPos + 1;
|
||||
// (0 < p->tempPos <= 5) // in non-marker version
|
||||
p->src = src + 1;
|
||||
extra++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PRF(printf("++++ ip = %8d tempPos = %8d src = %8d\n", p->ip, p->tempPos, p->srcLim - p->src));
|
||||
|
||||
PRF2("++++")
|
||||
// (p->tempPos == 0)
|
||||
Bcj2Enc_Encode_2(p);
|
||||
PRF2("====")
|
||||
|
||||
if (p->state == BCJ2_ENC_STATE_ORIG)
|
||||
{
|
||||
const Byte *src = p->src;
|
||||
unsigned rem = (unsigned)(p->srcLim - src);
|
||||
unsigned i;
|
||||
for (i = 0; i < rem; i++)
|
||||
p->temp[i] = src[i];
|
||||
p->tempPos = rem;
|
||||
p->src = src + rem;
|
||||
const Byte *srcLim = p->srcLim;
|
||||
const unsigned rem = (unsigned)(srcLim - src);
|
||||
/* (rem <= 4) here.
|
||||
if (p->src != p->srcLim), then
|
||||
- we copy non-processed bytes from (p->src) to temp[] buffer,
|
||||
- we set p->src equal to p->srcLim.
|
||||
*/
|
||||
if (rem)
|
||||
{
|
||||
unsigned i = 0;
|
||||
p->src = srcLim;
|
||||
p->tempPos = rem;
|
||||
// (0 < p->tempPos <= 4)
|
||||
do
|
||||
p->temp[i] = src[i];
|
||||
while (++i != rem);
|
||||
}
|
||||
// (p->tempPos <= 4)
|
||||
// (p->src == p->srcLim)
|
||||
}
|
||||
}
|
||||
|
||||
#undef PRF2
|
||||
#undef CONV_FLAG
|
||||
#undef MARKER_FLAG
|
||||
#undef WRITE_CONTEXT
|
||||
#undef WRITE_CONTEXT_AND_SRC
|
||||
#undef ONE_ITER
|
||||
#undef NUM_SHIFT_BITS
|
||||
#undef kTopValue
|
||||
#undef kNumBitModelTotalBits
|
||||
#undef kBitModelTotal
|
||||
#undef kNumMoveBits
|
||||
|
|
|
@ -1,230 +1,420 @@
|
|||
/* Bra.c -- Converters for RISC code
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
/* Bra.c -- Branch converters for RISC code
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "CpuArch.h"
|
||||
#include "Bra.h"
|
||||
#include "CpuArch.h"
|
||||
#include "RotateDefs.h"
|
||||
|
||||
SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
|
||||
#if defined(MY_CPU_SIZEOF_POINTER) \
|
||||
&& ( MY_CPU_SIZEOF_POINTER == 4 \
|
||||
|| MY_CPU_SIZEOF_POINTER == 8)
|
||||
#define BR_CONV_USE_OPT_PC_PTR
|
||||
#endif
|
||||
|
||||
#ifdef BR_CONV_USE_OPT_PC_PTR
|
||||
#define BR_PC_INIT pc -= (UInt32)(SizeT)p;
|
||||
#define BR_PC_GET (pc + (UInt32)(SizeT)p)
|
||||
#else
|
||||
#define BR_PC_INIT pc += (UInt32)size;
|
||||
#define BR_PC_GET (pc - (UInt32)(SizeT)(lim - p))
|
||||
// #define BR_PC_INIT
|
||||
// #define BR_PC_GET (pc + (UInt32)(SizeT)(p - data))
|
||||
#endif
|
||||
|
||||
#define BR_CONVERT_VAL(v, c) if (encoding) v += c; else v -= c;
|
||||
// #define BR_CONVERT_VAL(v, c) if (!encoding) c = (UInt32)0 - c; v += c;
|
||||
|
||||
#define Z7_BRANCH_CONV(name) z7_BranchConv_ ## name
|
||||
|
||||
#define Z7_BRANCH_FUNC_MAIN(name) \
|
||||
static \
|
||||
Z7_FORCE_INLINE \
|
||||
Z7_ATTRIB_NO_VECTOR \
|
||||
Byte *Z7_BRANCH_CONV(name)(Byte *p, SizeT size, UInt32 pc, int encoding)
|
||||
|
||||
#define Z7_BRANCH_FUNC_IMP(name, m, encoding) \
|
||||
Z7_NO_INLINE \
|
||||
Z7_ATTRIB_NO_VECTOR \
|
||||
Byte *m(name)(Byte *data, SizeT size, UInt32 pc) \
|
||||
{ return Z7_BRANCH_CONV(name)(data, size, pc, encoding); } \
|
||||
|
||||
#ifdef Z7_EXTRACT_ONLY
|
||||
#define Z7_BRANCH_FUNCS_IMP(name) \
|
||||
Z7_BRANCH_FUNC_IMP(name, Z7_BRANCH_CONV_DEC, 0)
|
||||
#else
|
||||
#define Z7_BRANCH_FUNCS_IMP(name) \
|
||||
Z7_BRANCH_FUNC_IMP(name, Z7_BRANCH_CONV_DEC, 0) \
|
||||
Z7_BRANCH_FUNC_IMP(name, Z7_BRANCH_CONV_ENC, 1)
|
||||
#endif
|
||||
|
||||
#if defined(__clang__)
|
||||
#define BR_EXTERNAL_FOR
|
||||
#define BR_NEXT_ITERATION continue;
|
||||
#else
|
||||
#define BR_EXTERNAL_FOR for (;;)
|
||||
#define BR_NEXT_ITERATION break;
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) && (__clang_major__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 1000) \
|
||||
// GCC is not good for __builtin_expect() here
|
||||
/* || defined(_MSC_VER) && (_MSC_VER >= 1920) */
|
||||
// #define Z7_unlikely [[unlikely]]
|
||||
// #define Z7_LIKELY(x) (__builtin_expect((x), 1))
|
||||
#define Z7_UNLIKELY(x) (__builtin_expect((x), 0))
|
||||
// #define Z7_likely [[likely]]
|
||||
#else
|
||||
// #define Z7_LIKELY(x) (x)
|
||||
#define Z7_UNLIKELY(x) (x)
|
||||
// #define Z7_likely
|
||||
#endif
|
||||
|
||||
|
||||
Z7_BRANCH_FUNC_MAIN(ARM64)
|
||||
{
|
||||
Byte *p;
|
||||
// Byte *p = data;
|
||||
const Byte *lim;
|
||||
size &= ~(size_t)3;
|
||||
ip += 4;
|
||||
p = data;
|
||||
lim = data + size;
|
||||
|
||||
if (encoding)
|
||||
|
||||
for (;;)
|
||||
const UInt32 flag = (UInt32)1 << (24 - 4);
|
||||
const UInt32 mask = ((UInt32)1 << 24) - (flag << 1);
|
||||
size &= ~(SizeT)3;
|
||||
// if (size == 0) return p;
|
||||
lim = p + size;
|
||||
BR_PC_INIT
|
||||
pc -= 4; // because (p) will point to next instruction
|
||||
|
||||
BR_EXTERNAL_FOR
|
||||
{
|
||||
// Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (;;)
|
||||
{
|
||||
if (p >= lim)
|
||||
return (SizeT)(p - data);
|
||||
UInt32 v;
|
||||
if Z7_UNLIKELY(p == lim)
|
||||
return p;
|
||||
v = GetUi32a(p);
|
||||
p += 4;
|
||||
if (p[-1] == 0xEB)
|
||||
break;
|
||||
}
|
||||
{
|
||||
UInt32 v = GetUi32(p - 4);
|
||||
v <<= 2;
|
||||
v += ip + (UInt32)(p - data);
|
||||
v >>= 2;
|
||||
v &= 0x00FFFFFF;
|
||||
v |= 0xEB000000;
|
||||
SetUi32(p - 4, v);
|
||||
}
|
||||
}
|
||||
|
||||
for (;;)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
if (p >= lim)
|
||||
return (SizeT)(p - data);
|
||||
p += 4;
|
||||
if (p[-1] == 0xEB)
|
||||
break;
|
||||
}
|
||||
{
|
||||
UInt32 v = GetUi32(p - 4);
|
||||
v <<= 2;
|
||||
v -= ip + (UInt32)(p - data);
|
||||
v >>= 2;
|
||||
v &= 0x00FFFFFF;
|
||||
v |= 0xEB000000;
|
||||
SetUi32(p - 4, v);
|
||||
if Z7_UNLIKELY(((v - 0x94000000) & 0xfc000000) == 0)
|
||||
{
|
||||
UInt32 c = BR_PC_GET >> 2;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
v &= 0x03ffffff;
|
||||
v |= 0x94000000;
|
||||
SetUi32a(p - 4, v)
|
||||
BR_NEXT_ITERATION
|
||||
}
|
||||
// v = rotlFixed(v, 8); v += (flag << 8) - 0x90; if Z7_UNLIKELY((v & ((mask << 8) + 0x9f)) == 0)
|
||||
v -= 0x90000000; if Z7_UNLIKELY((v & 0x9f000000) == 0)
|
||||
{
|
||||
UInt32 z, c;
|
||||
// v = rotrFixed(v, 8);
|
||||
v += flag; if Z7_UNLIKELY(v & mask) continue;
|
||||
z = (v & 0xffffffe0) | (v >> 26);
|
||||
c = (BR_PC_GET >> (12 - 3)) & ~(UInt32)7;
|
||||
BR_CONVERT_VAL(z, c)
|
||||
v &= 0x1f;
|
||||
v |= 0x90000000;
|
||||
v |= z << 26;
|
||||
v |= 0x00ffffe0 & ((z & (((flag << 1) - 1))) - flag);
|
||||
SetUi32a(p - 4, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Z7_BRANCH_FUNCS_IMP(ARM64)
|
||||
|
||||
|
||||
SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
|
||||
Z7_BRANCH_FUNC_MAIN(ARM)
|
||||
{
|
||||
Byte *p;
|
||||
// Byte *p = data;
|
||||
const Byte *lim;
|
||||
size &= ~(size_t)1;
|
||||
p = data;
|
||||
lim = data + size - 4;
|
||||
|
||||
if (encoding)
|
||||
size &= ~(SizeT)3;
|
||||
lim = p + size;
|
||||
BR_PC_INIT
|
||||
/* in ARM: branch offset is relative to the +2 instructions from current instruction.
|
||||
(p) will point to next instruction */
|
||||
pc += 8 - 4;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
UInt32 b1;
|
||||
for (;;)
|
||||
{
|
||||
UInt32 b3;
|
||||
if (p > lim)
|
||||
return (SizeT)(p - data);
|
||||
b1 = p[1];
|
||||
b3 = p[3];
|
||||
p += 2;
|
||||
b1 ^= 8;
|
||||
if ((b3 & b1) >= 0xF8)
|
||||
break;
|
||||
if Z7_UNLIKELY(p >= lim) { return p; } p += 4; if Z7_UNLIKELY(p[-1] == 0xeb) break;
|
||||
if Z7_UNLIKELY(p >= lim) { return p; } p += 4; if Z7_UNLIKELY(p[-1] == 0xeb) break;
|
||||
}
|
||||
{
|
||||
UInt32 v =
|
||||
((UInt32)b1 << 19)
|
||||
+ (((UInt32)p[1] & 0x7) << 8)
|
||||
+ (((UInt32)p[-2] << 11))
|
||||
+ (p[0]);
|
||||
|
||||
p += 2;
|
||||
{
|
||||
UInt32 cur = (ip + (UInt32)(p - data)) >> 1;
|
||||
v += cur;
|
||||
}
|
||||
|
||||
p[-4] = (Byte)(v >> 11);
|
||||
p[-3] = (Byte)(0xF0 | ((v >> 19) & 0x7));
|
||||
p[-2] = (Byte)v;
|
||||
p[-1] = (Byte)(0xF8 | (v >> 8));
|
||||
}
|
||||
}
|
||||
|
||||
for (;;)
|
||||
{
|
||||
UInt32 b1;
|
||||
for (;;)
|
||||
{
|
||||
UInt32 b3;
|
||||
if (p > lim)
|
||||
return (SizeT)(p - data);
|
||||
b1 = p[1];
|
||||
b3 = p[3];
|
||||
p += 2;
|
||||
b1 ^= 8;
|
||||
if ((b3 & b1) >= 0xF8)
|
||||
break;
|
||||
}
|
||||
{
|
||||
UInt32 v =
|
||||
((UInt32)b1 << 19)
|
||||
+ (((UInt32)p[1] & 0x7) << 8)
|
||||
+ (((UInt32)p[-2] << 11))
|
||||
+ (p[0]);
|
||||
|
||||
p += 2;
|
||||
{
|
||||
UInt32 cur = (ip + (UInt32)(p - data)) >> 1;
|
||||
v -= cur;
|
||||
}
|
||||
|
||||
/*
|
||||
SetUi16(p - 4, (UInt16)(((v >> 11) & 0x7FF) | 0xF000));
|
||||
SetUi16(p - 2, (UInt16)(v | 0xF800));
|
||||
*/
|
||||
|
||||
p[-4] = (Byte)(v >> 11);
|
||||
p[-3] = (Byte)(0xF0 | ((v >> 19) & 0x7));
|
||||
p[-2] = (Byte)v;
|
||||
p[-1] = (Byte)(0xF8 | (v >> 8));
|
||||
UInt32 v = GetUi32a(p - 4);
|
||||
UInt32 c = BR_PC_GET >> 2;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
v &= 0x00ffffff;
|
||||
v |= 0xeb000000;
|
||||
SetUi32a(p - 4, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
Z7_BRANCH_FUNCS_IMP(ARM)
|
||||
|
||||
|
||||
SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
|
||||
Z7_BRANCH_FUNC_MAIN(PPC)
|
||||
{
|
||||
Byte *p;
|
||||
// Byte *p = data;
|
||||
const Byte *lim;
|
||||
size &= ~(size_t)3;
|
||||
ip -= 4;
|
||||
p = data;
|
||||
lim = data + size;
|
||||
|
||||
size &= ~(SizeT)3;
|
||||
lim = p + size;
|
||||
BR_PC_INIT
|
||||
pc -= 4; // because (p) will point to next instruction
|
||||
|
||||
for (;;)
|
||||
{
|
||||
UInt32 v;
|
||||
for (;;)
|
||||
{
|
||||
if (p >= lim)
|
||||
return (SizeT)(p - data);
|
||||
if Z7_UNLIKELY(p == lim)
|
||||
return p;
|
||||
// v = GetBe32a(p);
|
||||
v = *(UInt32 *)(void *)p;
|
||||
p += 4;
|
||||
/* if ((v & 0xFC000003) == 0x48000001) */
|
||||
if ((p[-4] & 0xFC) == 0x48 && (p[-1] & 3) == 1)
|
||||
break;
|
||||
// if ((v & 0xfc000003) == 0x48000001) break;
|
||||
// if ((p[-4] & 0xFC) == 0x48 && (p[-1] & 3) == 1) break;
|
||||
if Z7_UNLIKELY(
|
||||
((v - Z7_CONV_BE_TO_NATIVE_CONST32(0x48000001))
|
||||
& Z7_CONV_BE_TO_NATIVE_CONST32(0xfc000003)) == 0) break;
|
||||
}
|
||||
{
|
||||
UInt32 v = GetBe32(p - 4);
|
||||
if (encoding)
|
||||
v += ip + (UInt32)(p - data);
|
||||
else
|
||||
v -= ip + (UInt32)(p - data);
|
||||
v &= 0x03FFFFFF;
|
||||
v = Z7_CONV_NATIVE_TO_BE_32(v);
|
||||
{
|
||||
UInt32 c = BR_PC_GET;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
}
|
||||
v &= 0x03ffffff;
|
||||
v |= 0x48000000;
|
||||
SetBe32(p - 4, v);
|
||||
SetBe32a(p - 4, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
Z7_BRANCH_FUNCS_IMP(PPC)
|
||||
|
||||
|
||||
SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
|
||||
#ifdef Z7_CPU_FAST_ROTATE_SUPPORTED
|
||||
#define BR_SPARC_USE_ROTATE
|
||||
#endif
|
||||
|
||||
Z7_BRANCH_FUNC_MAIN(SPARC)
|
||||
{
|
||||
Byte *p;
|
||||
// Byte *p = data;
|
||||
const Byte *lim;
|
||||
size &= ~(size_t)3;
|
||||
ip -= 4;
|
||||
p = data;
|
||||
lim = data + size;
|
||||
|
||||
const UInt32 flag = (UInt32)1 << 22;
|
||||
size &= ~(SizeT)3;
|
||||
lim = p + size;
|
||||
BR_PC_INIT
|
||||
pc -= 4; // because (p) will point to next instruction
|
||||
for (;;)
|
||||
{
|
||||
UInt32 v;
|
||||
for (;;)
|
||||
{
|
||||
if (p >= lim)
|
||||
return (SizeT)(p - data);
|
||||
/*
|
||||
v = GetBe32(p);
|
||||
p += 4;
|
||||
m = v + ((UInt32)5 << 29);
|
||||
m ^= (UInt32)7 << 29;
|
||||
m += (UInt32)1 << 22;
|
||||
if ((m & ((UInt32)0x1FF << 23)) == 0)
|
||||
break;
|
||||
if Z7_UNLIKELY(p == lim)
|
||||
return p;
|
||||
/* // the code without GetBe32a():
|
||||
{ const UInt32 v = GetUi16a(p) & 0xc0ff; p += 4; if (v == 0x40 || v == 0xc07f) break; }
|
||||
*/
|
||||
v = GetBe32a(p);
|
||||
p += 4;
|
||||
if ((p[-4] == 0x40 && (p[-3] & 0xC0) == 0) ||
|
||||
(p[-4] == 0x7F && (p[-3] >= 0xC0)))
|
||||
#ifdef BR_SPARC_USE_ROTATE
|
||||
v = rotlFixed(v, 2);
|
||||
v += (flag << 2) - 1;
|
||||
if Z7_UNLIKELY((v & (3 - (flag << 3))) == 0)
|
||||
#else
|
||||
v += (UInt32)5 << 29;
|
||||
v ^= (UInt32)7 << 29;
|
||||
v += flag;
|
||||
if Z7_UNLIKELY((v & (0 - (flag << 1))) == 0)
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
{
|
||||
UInt32 v = GetBe32(p - 4);
|
||||
// UInt32 v = GetBe32a(p - 4);
|
||||
#ifndef BR_SPARC_USE_ROTATE
|
||||
v <<= 2;
|
||||
if (encoding)
|
||||
v += ip + (UInt32)(p - data);
|
||||
else
|
||||
v -= ip + (UInt32)(p - data);
|
||||
|
||||
v &= 0x01FFFFFF;
|
||||
v -= (UInt32)1 << 24;
|
||||
v ^= 0xFF000000;
|
||||
#endif
|
||||
{
|
||||
UInt32 c = BR_PC_GET;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
}
|
||||
v &= (flag << 3) - 1;
|
||||
#ifdef BR_SPARC_USE_ROTATE
|
||||
v -= (flag << 2) - 1;
|
||||
v = rotrFixed(v, 2);
|
||||
#else
|
||||
v -= (flag << 2);
|
||||
v >>= 2;
|
||||
v |= 0x40000000;
|
||||
SetBe32(p - 4, v);
|
||||
v |= (UInt32)1 << 30;
|
||||
#endif
|
||||
SetBe32a(p - 4, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
Z7_BRANCH_FUNCS_IMP(SPARC)
|
||||
|
||||
|
||||
Z7_BRANCH_FUNC_MAIN(ARMT)
|
||||
{
|
||||
// Byte *p = data;
|
||||
Byte *lim;
|
||||
size &= ~(SizeT)1;
|
||||
// if (size == 0) return p;
|
||||
if (size <= 2) return p;
|
||||
size -= 2;
|
||||
lim = p + size;
|
||||
BR_PC_INIT
|
||||
/* in ARM: branch offset is relative to the +2 instructions from current instruction.
|
||||
(p) will point to the +2 instructions from current instruction */
|
||||
// pc += 4 - 4;
|
||||
// if (encoding) pc -= 0xf800 << 1; else pc += 0xf800 << 1;
|
||||
// #define ARMT_TAIL_PROC { goto armt_tail; }
|
||||
#define ARMT_TAIL_PROC { return p; }
|
||||
|
||||
do
|
||||
{
|
||||
/* in MSVC 32-bit x86 compilers:
|
||||
UInt32 version : it loads value from memory with movzx
|
||||
Byte version : it loads value to 8-bit register (AL/CL)
|
||||
movzx version is slightly faster in some cpus
|
||||
*/
|
||||
unsigned b1;
|
||||
// Byte / unsigned
|
||||
b1 = p[1];
|
||||
// optimized version to reduce one (p >= lim) check:
|
||||
// unsigned a1 = p[1]; b1 = p[3]; p += 2; if Z7_LIKELY((b1 & (a1 ^ 8)) < 0xf8)
|
||||
for (;;)
|
||||
{
|
||||
unsigned b3; // Byte / UInt32
|
||||
/* (Byte)(b3) normalization can use low byte computations in MSVC.
|
||||
It gives smaller code, and no loss of speed in some compilers/cpus.
|
||||
But new MSVC 32-bit x86 compilers use more slow load
|
||||
from memory to low byte register in that case.
|
||||
So we try to use full 32-bit computations for faster code.
|
||||
*/
|
||||
// if (p >= lim) { ARMT_TAIL_PROC } b3 = b1 + 8; b1 = p[3]; p += 2; if ((b3 & b1) >= 0xf8) break;
|
||||
if Z7_UNLIKELY(p >= lim) { ARMT_TAIL_PROC } b3 = p[3]; p += 2; if Z7_UNLIKELY((b3 & (b1 ^ 8)) >= 0xf8) break;
|
||||
if Z7_UNLIKELY(p >= lim) { ARMT_TAIL_PROC } b1 = p[3]; p += 2; if Z7_UNLIKELY((b1 & (b3 ^ 8)) >= 0xf8) break;
|
||||
}
|
||||
{
|
||||
/* we can adjust pc for (0xf800) to rid of (& 0x7FF) operation.
|
||||
But gcc/clang for arm64 can use bfi instruction for full code here */
|
||||
UInt32 v =
|
||||
((UInt32)GetUi16a(p - 2) << 11) |
|
||||
((UInt32)GetUi16a(p) & 0x7FF);
|
||||
/*
|
||||
UInt32 v =
|
||||
((UInt32)p[1 - 2] << 19)
|
||||
+ (((UInt32)p[1] & 0x7) << 8)
|
||||
+ (((UInt32)p[-2] << 11))
|
||||
+ (p[0]);
|
||||
*/
|
||||
p += 2;
|
||||
{
|
||||
UInt32 c = BR_PC_GET >> 1;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
}
|
||||
SetUi16a(p - 4, (UInt16)(((v >> 11) & 0x7ff) | 0xf000))
|
||||
SetUi16a(p - 2, (UInt16)(v | 0xf800))
|
||||
/*
|
||||
p[-4] = (Byte)(v >> 11);
|
||||
p[-3] = (Byte)(0xf0 | ((v >> 19) & 0x7));
|
||||
p[-2] = (Byte)v;
|
||||
p[-1] = (Byte)(0xf8 | (v >> 8));
|
||||
*/
|
||||
}
|
||||
}
|
||||
while (p < lim);
|
||||
return p;
|
||||
// armt_tail:
|
||||
// if ((Byte)((lim[1] & 0xf8)) != 0xf0) { lim += 2; } return lim;
|
||||
// return (Byte *)(lim + ((Byte)((lim[1] ^ 0xf0) & 0xf8) == 0 ? 0 : 2));
|
||||
// return (Byte *)(lim + (((lim[1] ^ ~0xfu) & ~7u) == 0 ? 0 : 2));
|
||||
// return (Byte *)(lim + 2 - (((((unsigned)lim[1] ^ 8) + 8) >> 7) & 2));
|
||||
}
|
||||
Z7_BRANCH_FUNCS_IMP(ARMT)
|
||||
|
||||
|
||||
// #define BR_IA64_NO_INLINE
|
||||
|
||||
Z7_BRANCH_FUNC_MAIN(IA64)
|
||||
{
|
||||
// Byte *p = data;
|
||||
const Byte *lim;
|
||||
size &= ~(SizeT)15;
|
||||
lim = p + size;
|
||||
pc -= 1 << 4;
|
||||
pc >>= 4 - 1;
|
||||
// pc -= 1 << 1;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
unsigned m;
|
||||
for (;;)
|
||||
{
|
||||
if Z7_UNLIKELY(p == lim)
|
||||
return p;
|
||||
m = (unsigned)((UInt32)0x334b0000 >> (*p & 0x1e));
|
||||
p += 16;
|
||||
pc += 1 << 1;
|
||||
if (m &= 3)
|
||||
break;
|
||||
}
|
||||
{
|
||||
p += (ptrdiff_t)m * 5 - 20; // negative value is expected here.
|
||||
do
|
||||
{
|
||||
const UInt32 t =
|
||||
#if defined(MY_CPU_X86_OR_AMD64)
|
||||
// we use 32-bit load here to reduce code size on x86:
|
||||
GetUi32(p);
|
||||
#else
|
||||
GetUi16(p);
|
||||
#endif
|
||||
UInt32 z = GetUi32(p + 1) >> m;
|
||||
p += 5;
|
||||
if (((t >> m) & (0x70 << 1)) == 0
|
||||
&& ((z - (0x5000000 << 1)) & (0xf000000 << 1)) == 0)
|
||||
{
|
||||
UInt32 v = (UInt32)((0x8fffff << 1) | 1) & z;
|
||||
z ^= v;
|
||||
#ifdef BR_IA64_NO_INLINE
|
||||
v |= (v & ((UInt32)1 << (23 + 1))) >> 3;
|
||||
{
|
||||
UInt32 c = pc;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
}
|
||||
v &= (0x1fffff << 1) | 1;
|
||||
#else
|
||||
{
|
||||
if (encoding)
|
||||
{
|
||||
// pc &= ~(0xc00000 << 1); // we just need to clear at least 2 bits
|
||||
pc &= (0x1fffff << 1) | 1;
|
||||
v += pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// pc |= 0xc00000 << 1; // we need to set at least 2 bits
|
||||
pc |= ~(UInt32)((0x1fffff << 1) | 1);
|
||||
v -= pc;
|
||||
}
|
||||
}
|
||||
v &= ~(UInt32)(0x600000 << 1);
|
||||
#endif
|
||||
v += (0x700000 << 1);
|
||||
v &= (0x8fffff << 1) | 1;
|
||||
z |= v;
|
||||
z <<= m;
|
||||
SetUi32(p + 1 - 5, z)
|
||||
}
|
||||
m++;
|
||||
}
|
||||
while (m &= 3); // while (m < 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
Z7_BRANCH_FUNCS_IMP(IA64)
|
||||
|
|
|
@ -1,82 +1,187 @@
|
|||
/* Bra86.c -- Converter for x86 code (BCJ)
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
/* Bra86.c -- Branch converter for X86 code (BCJ)
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "Bra.h"
|
||||
#include "CpuArch.h"
|
||||
|
||||
#define Test86MSByte(b) ((((b) + 1) & 0xFE) == 0)
|
||||
|
||||
SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding)
|
||||
#if defined(MY_CPU_SIZEOF_POINTER) \
|
||||
&& ( MY_CPU_SIZEOF_POINTER == 4 \
|
||||
|| MY_CPU_SIZEOF_POINTER == 8)
|
||||
#define BR_CONV_USE_OPT_PC_PTR
|
||||
#endif
|
||||
|
||||
#ifdef BR_CONV_USE_OPT_PC_PTR
|
||||
#define BR_PC_INIT pc -= (UInt32)(SizeT)p; // (MY_uintptr_t)
|
||||
#define BR_PC_GET (pc + (UInt32)(SizeT)p)
|
||||
#else
|
||||
#define BR_PC_INIT pc += (UInt32)size;
|
||||
#define BR_PC_GET (pc - (UInt32)(SizeT)(lim - p))
|
||||
// #define BR_PC_INIT
|
||||
// #define BR_PC_GET (pc + (UInt32)(SizeT)(p - data))
|
||||
#endif
|
||||
|
||||
#define BR_CONVERT_VAL(v, c) if (encoding) v += c; else v -= c;
|
||||
// #define BR_CONVERT_VAL(v, c) if (!encoding) c = (UInt32)0 - c; v += c;
|
||||
|
||||
#define Z7_BRANCH_CONV_ST(name) z7_BranchConvSt_ ## name
|
||||
|
||||
#define BR86_NEED_CONV_FOR_MS_BYTE(b) ((((b) + 1) & 0xfe) == 0)
|
||||
|
||||
#ifdef MY_CPU_LE_UNALIGN
|
||||
#define BR86_PREPARE_BCJ_SCAN const UInt32 v = GetUi32(p) ^ 0xe8e8e8e8;
|
||||
#define BR86_IS_BCJ_BYTE(n) ((v & ((UInt32)0xfe << (n) * 8)) == 0)
|
||||
#else
|
||||
#define BR86_PREPARE_BCJ_SCAN
|
||||
// bad for MSVC X86 (partial write to byte reg):
|
||||
#define BR86_IS_BCJ_BYTE(n) ((p[n - 4] & 0xfe) == 0xe8)
|
||||
// bad for old MSVC (partial write to byte reg):
|
||||
// #define BR86_IS_BCJ_BYTE(n) (((*p ^ 0xe8) & 0xfe) == 0)
|
||||
#endif
|
||||
|
||||
static
|
||||
Z7_FORCE_INLINE
|
||||
Z7_ATTRIB_NO_VECTOR
|
||||
Byte *Z7_BRANCH_CONV_ST(X86)(Byte *p, SizeT size, UInt32 pc, UInt32 *state, int encoding)
|
||||
{
|
||||
SizeT pos = 0;
|
||||
UInt32 mask = *state & 7;
|
||||
if (size < 5)
|
||||
return 0;
|
||||
size -= 4;
|
||||
ip += 5;
|
||||
return p;
|
||||
{
|
||||
// Byte *p = data;
|
||||
const Byte *lim = p + size - 4;
|
||||
unsigned mask = (unsigned)*state; // & 7;
|
||||
#ifdef BR_CONV_USE_OPT_PC_PTR
|
||||
/* if BR_CONV_USE_OPT_PC_PTR is defined: we need to adjust (pc) for (+4),
|
||||
because call/jump offset is relative to the next instruction.
|
||||
if BR_CONV_USE_OPT_PC_PTR is not defined : we don't need to adjust (pc) for (+4),
|
||||
because BR_PC_GET uses (pc - (lim - p)), and lim was adjusted for (-4) before.
|
||||
*/
|
||||
pc += 4;
|
||||
#endif
|
||||
BR_PC_INIT
|
||||
goto start;
|
||||
|
||||
for (;;)
|
||||
for (;; mask |= 4)
|
||||
{
|
||||
Byte *p = data + pos;
|
||||
const Byte *limit = data + size;
|
||||
for (; p < limit; p++)
|
||||
if ((*p & 0xFE) == 0xE8)
|
||||
break;
|
||||
|
||||
// cont: mask |= 4;
|
||||
start:
|
||||
if (p >= lim)
|
||||
goto fin;
|
||||
{
|
||||
SizeT d = (SizeT)(p - data) - pos;
|
||||
pos = (SizeT)(p - data);
|
||||
if (p >= limit)
|
||||
{
|
||||
*state = (d > 2 ? 0 : mask >> (unsigned)d);
|
||||
return pos;
|
||||
}
|
||||
if (d > 2)
|
||||
mask = 0;
|
||||
else
|
||||
{
|
||||
mask >>= (unsigned)d;
|
||||
if (mask != 0 && (mask > 4 || mask == 3 || Test86MSByte(p[(size_t)(mask >> 1) + 1])))
|
||||
{
|
||||
mask = (mask >> 1) | 4;
|
||||
pos++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
BR86_PREPARE_BCJ_SCAN
|
||||
p += 4;
|
||||
if (BR86_IS_BCJ_BYTE(0)) { goto m0; } mask >>= 1;
|
||||
if (BR86_IS_BCJ_BYTE(1)) { goto m1; } mask >>= 1;
|
||||
if (BR86_IS_BCJ_BYTE(2)) { goto m2; } mask = 0;
|
||||
if (BR86_IS_BCJ_BYTE(3)) { goto a3; }
|
||||
}
|
||||
goto main_loop;
|
||||
|
||||
if (Test86MSByte(p[4]))
|
||||
m0: p--;
|
||||
m1: p--;
|
||||
m2: p--;
|
||||
if (mask == 0)
|
||||
goto a3;
|
||||
if (p > lim)
|
||||
goto fin_p;
|
||||
|
||||
// if (((0x17u >> mask) & 1) == 0)
|
||||
if (mask > 4 || mask == 3)
|
||||
{
|
||||
UInt32 v = ((UInt32)p[4] << 24) | ((UInt32)p[3] << 16) | ((UInt32)p[2] << 8) | ((UInt32)p[1]);
|
||||
UInt32 cur = ip + (UInt32)pos;
|
||||
pos += 5;
|
||||
if (encoding)
|
||||
v += cur;
|
||||
else
|
||||
v -= cur;
|
||||
if (mask != 0)
|
||||
mask >>= 1;
|
||||
continue; // goto cont;
|
||||
}
|
||||
mask >>= 1;
|
||||
if (BR86_NEED_CONV_FOR_MS_BYTE(p[mask]))
|
||||
continue; // goto cont;
|
||||
// if (!BR86_NEED_CONV_FOR_MS_BYTE(p[3])) continue; // goto cont;
|
||||
{
|
||||
UInt32 v = GetUi32(p);
|
||||
UInt32 c;
|
||||
v += (1 << 24); if (v & 0xfe000000) continue; // goto cont;
|
||||
c = BR_PC_GET;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
{
|
||||
unsigned sh = (mask & 6) << 2;
|
||||
if (Test86MSByte((Byte)(v >> sh)))
|
||||
mask <<= 3;
|
||||
if (BR86_NEED_CONV_FOR_MS_BYTE(v >> mask))
|
||||
{
|
||||
v ^= (((UInt32)0x100 << sh) - 1);
|
||||
if (encoding)
|
||||
v += cur;
|
||||
else
|
||||
v -= cur;
|
||||
v ^= (((UInt32)0x100 << mask) - 1);
|
||||
#ifdef MY_CPU_X86
|
||||
// for X86 : we can recalculate (c) to reduce register pressure
|
||||
c = BR_PC_GET;
|
||||
#endif
|
||||
BR_CONVERT_VAL(v, c)
|
||||
}
|
||||
mask = 0;
|
||||
}
|
||||
p[1] = (Byte)v;
|
||||
p[2] = (Byte)(v >> 8);
|
||||
p[3] = (Byte)(v >> 16);
|
||||
p[4] = (Byte)(0 - ((v >> 24) & 1));
|
||||
// v = (v & ((1 << 24) - 1)) - (v & (1 << 24));
|
||||
v &= (1 << 25) - 1; v -= (1 << 24);
|
||||
SetUi32(p, v)
|
||||
p += 4;
|
||||
goto main_loop;
|
||||
}
|
||||
else
|
||||
|
||||
main_loop:
|
||||
if (p >= lim)
|
||||
goto fin;
|
||||
for (;;)
|
||||
{
|
||||
mask = (mask >> 1) | 4;
|
||||
pos++;
|
||||
BR86_PREPARE_BCJ_SCAN
|
||||
p += 4;
|
||||
if (BR86_IS_BCJ_BYTE(0)) { goto a0; }
|
||||
if (BR86_IS_BCJ_BYTE(1)) { goto a1; }
|
||||
if (BR86_IS_BCJ_BYTE(2)) { goto a2; }
|
||||
if (BR86_IS_BCJ_BYTE(3)) { goto a3; }
|
||||
if (p >= lim)
|
||||
goto fin;
|
||||
}
|
||||
|
||||
a0: p--;
|
||||
a1: p--;
|
||||
a2: p--;
|
||||
a3:
|
||||
if (p > lim)
|
||||
goto fin_p;
|
||||
// if (!BR86_NEED_CONV_FOR_MS_BYTE(p[3])) continue; // goto cont;
|
||||
{
|
||||
UInt32 v = GetUi32(p);
|
||||
UInt32 c;
|
||||
v += (1 << 24); if (v & 0xfe000000) continue; // goto cont;
|
||||
c = BR_PC_GET;
|
||||
BR_CONVERT_VAL(v, c)
|
||||
// v = (v & ((1 << 24) - 1)) - (v & (1 << 24));
|
||||
v &= (1 << 25) - 1; v -= (1 << 24);
|
||||
SetUi32(p, v)
|
||||
p += 4;
|
||||
goto main_loop;
|
||||
}
|
||||
}
|
||||
|
||||
fin_p:
|
||||
p--;
|
||||
fin:
|
||||
// the following processing for tail is optional and can be commented
|
||||
/*
|
||||
lim += 4;
|
||||
for (; p < lim; p++, mask >>= 1)
|
||||
if ((*p & 0xfe) == 0xe8)
|
||||
break;
|
||||
*/
|
||||
*state = (UInt32)mask;
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define Z7_BRANCH_CONV_ST_FUNC_IMP(name, m, encoding) \
|
||||
Z7_NO_INLINE \
|
||||
Z7_ATTRIB_NO_VECTOR \
|
||||
Byte *m(name)(Byte *data, SizeT size, UInt32 pc, UInt32 *state) \
|
||||
{ return Z7_BRANCH_CONV_ST(name)(data, size, pc, state, encoding); }
|
||||
|
||||
Z7_BRANCH_CONV_ST_FUNC_IMP(X86, Z7_BRANCH_CONV_ST_DEC, 0)
|
||||
#ifndef Z7_EXTRACT_ONLY
|
||||
Z7_BRANCH_CONV_ST_FUNC_IMP(X86, Z7_BRANCH_CONV_ST_ENC, 1)
|
||||
#endif
|
||||
|
|
|
@ -1,53 +1,14 @@
|
|||
/* BraIA64.c -- Converter for IA-64 code
|
||||
2017-01-26 : Igor Pavlov : Public domain */
|
||||
2023-02-20 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "CpuArch.h"
|
||||
#include "Bra.h"
|
||||
// the code was moved to Bra.c
|
||||
|
||||
SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
|
||||
{
|
||||
SizeT i;
|
||||
if (size < 16)
|
||||
return 0;
|
||||
size -= 16;
|
||||
i = 0;
|
||||
do
|
||||
{
|
||||
unsigned m = ((UInt32)0x334B0000 >> (data[i] & 0x1E)) & 3;
|
||||
if (m)
|
||||
{
|
||||
m++;
|
||||
do
|
||||
{
|
||||
Byte *p = data + (i + (size_t)m * 5 - 8);
|
||||
if (((p[3] >> m) & 15) == 5
|
||||
&& (((p[-1] | ((UInt32)p[0] << 8)) >> m) & 0x70) == 0)
|
||||
{
|
||||
unsigned raw = GetUi32(p);
|
||||
unsigned v = raw >> m;
|
||||
v = (v & 0xFFFFF) | ((v & (1 << 23)) >> 3);
|
||||
|
||||
v <<= 4;
|
||||
if (encoding)
|
||||
v += ip + (UInt32)i;
|
||||
else
|
||||
v -= ip + (UInt32)i;
|
||||
v >>= 4;
|
||||
|
||||
v &= 0x1FFFFF;
|
||||
v += 0x700000;
|
||||
v &= 0x8FFFFF;
|
||||
raw &= ~((UInt32)0x8FFFFF << m);
|
||||
raw |= (v << m);
|
||||
SetUi32(p, raw);
|
||||
}
|
||||
}
|
||||
while (++m <= 4);
|
||||
}
|
||||
i += 16;
|
||||
}
|
||||
while (i <= size);
|
||||
return i;
|
||||
}
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable : 4206) // nonstandard extension used : translation unit is empty
|
||||
#endif
|
||||
|
||||
#if defined(__clang__)
|
||||
#pragma GCC diagnostic ignored "-Wempty-translation-unit"
|
||||
#endif
|
||||
|
|
|
@ -1,187 +1,318 @@
|
|||
/* CpuArch.c -- CPU specific code
|
||||
2021-07-13 : Igor Pavlov : Public domain */
|
||||
2023-05-18 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
// #include <stdio.h>
|
||||
|
||||
#include "CpuArch.h"
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
|
||||
#if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
|
||||
#define USE_ASM
|
||||
#undef NEED_CHECK_FOR_CPUID
|
||||
#if !defined(MY_CPU_AMD64)
|
||||
#define NEED_CHECK_FOR_CPUID
|
||||
#endif
|
||||
|
||||
#if !defined(USE_ASM) && _MSC_VER >= 1500
|
||||
#include <intrin.h>
|
||||
/*
|
||||
cpuid instruction supports (subFunction) parameter in ECX,
|
||||
that is used only with some specific (function) parameter values.
|
||||
But we always use only (subFunction==0).
|
||||
*/
|
||||
/*
|
||||
__cpuid(): MSVC and GCC/CLANG use same function/macro name
|
||||
but parameters are different.
|
||||
We use MSVC __cpuid() parameters style for our z7_x86_cpuid() function.
|
||||
*/
|
||||
|
||||
#if defined(__GNUC__) /* && (__GNUC__ >= 10) */ \
|
||||
|| defined(__clang__) /* && (__clang_major__ >= 10) */
|
||||
|
||||
/* there was some CLANG/GCC compilers that have issues with
|
||||
rbx(ebx) handling in asm blocks in -fPIC mode (__PIC__ is defined).
|
||||
compiler's <cpuid.h> contains the macro __cpuid() that is similar to our code.
|
||||
The history of __cpuid() changes in CLANG/GCC:
|
||||
GCC:
|
||||
2007: it preserved ebx for (__PIC__ && __i386__)
|
||||
2013: it preserved rbx and ebx for __PIC__
|
||||
2014: it doesn't preserves rbx and ebx anymore
|
||||
we suppose that (__GNUC__ >= 5) fixed that __PIC__ ebx/rbx problem.
|
||||
CLANG:
|
||||
2014+: it preserves rbx, but only for 64-bit code. No __PIC__ check.
|
||||
Why CLANG cares about 64-bit mode only, and doesn't care about ebx (in 32-bit)?
|
||||
Do we need __PIC__ test for CLANG or we must care about rbx even if
|
||||
__PIC__ is not defined?
|
||||
*/
|
||||
|
||||
#define ASM_LN "\n"
|
||||
|
||||
#if defined(MY_CPU_AMD64) && defined(__PIC__) \
|
||||
&& ((defined (__GNUC__) && (__GNUC__ < 5)) || defined(__clang__))
|
||||
|
||||
#define x86_cpuid_MACRO(p, func) { \
|
||||
__asm__ __volatile__ ( \
|
||||
ASM_LN "mov %%rbx, %q1" \
|
||||
ASM_LN "cpuid" \
|
||||
ASM_LN "xchg %%rbx, %q1" \
|
||||
: "=a" ((p)[0]), "=&r" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
|
||||
|
||||
/* "=&r" selects free register. It can select even rbx, if that register is free.
|
||||
"=&D" for (RDI) also works, but the code can be larger with "=&D"
|
||||
"2"(0) means (subFunction = 0),
|
||||
2 is (zero-based) index in the output constraint list "=c" (ECX). */
|
||||
|
||||
#elif defined(MY_CPU_X86) && defined(__PIC__) \
|
||||
&& ((defined (__GNUC__) && (__GNUC__ < 5)) || defined(__clang__))
|
||||
|
||||
#define x86_cpuid_MACRO(p, func) { \
|
||||
__asm__ __volatile__ ( \
|
||||
ASM_LN "mov %%ebx, %k1" \
|
||||
ASM_LN "cpuid" \
|
||||
ASM_LN "xchg %%ebx, %k1" \
|
||||
: "=a" ((p)[0]), "=&r" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
|
||||
|
||||
#else
|
||||
|
||||
#define x86_cpuid_MACRO(p, func) { \
|
||||
__asm__ __volatile__ ( \
|
||||
ASM_LN "cpuid" \
|
||||
: "=a" ((p)[0]), "=b" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_ASM) && !defined(MY_CPU_AMD64)
|
||||
static UInt32 CheckFlag(UInt32 flag)
|
||||
|
||||
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
__asm pushfd;
|
||||
__asm pop EAX;
|
||||
__asm mov EDX, EAX;
|
||||
__asm xor EAX, flag;
|
||||
__asm push EAX;
|
||||
__asm popfd;
|
||||
__asm pushfd;
|
||||
__asm pop EAX;
|
||||
__asm xor EAX, EDX;
|
||||
__asm push EDX;
|
||||
__asm popfd;
|
||||
__asm and flag, EAX;
|
||||
#else
|
||||
__asm__ __volatile__ (
|
||||
"pushf\n\t"
|
||||
"pop %%EAX\n\t"
|
||||
"movl %%EAX,%%EDX\n\t"
|
||||
"xorl %0,%%EAX\n\t"
|
||||
"push %%EAX\n\t"
|
||||
"popf\n\t"
|
||||
"pushf\n\t"
|
||||
"pop %%EAX\n\t"
|
||||
"xorl %%EDX,%%EAX\n\t"
|
||||
"push %%EDX\n\t"
|
||||
"popf\n\t"
|
||||
"andl %%EAX, %0\n\t":
|
||||
"=c" (flag) : "c" (flag) :
|
||||
"%eax", "%edx");
|
||||
#endif
|
||||
return flag;
|
||||
x86_cpuid_MACRO(p, func)
|
||||
}
|
||||
#define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
|
||||
|
||||
|
||||
Z7_NO_INLINE
|
||||
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
|
||||
{
|
||||
#if defined(NEED_CHECK_FOR_CPUID)
|
||||
#define EFALGS_CPUID_BIT 21
|
||||
UInt32 a;
|
||||
__asm__ __volatile__ (
|
||||
ASM_LN "pushf"
|
||||
ASM_LN "pushf"
|
||||
ASM_LN "pop %0"
|
||||
// ASM_LN "movl %0, %1"
|
||||
// ASM_LN "xorl $0x200000, %0"
|
||||
ASM_LN "btc %1, %0"
|
||||
ASM_LN "push %0"
|
||||
ASM_LN "popf"
|
||||
ASM_LN "pushf"
|
||||
ASM_LN "pop %0"
|
||||
ASM_LN "xorl (%%esp), %0"
|
||||
|
||||
ASM_LN "popf"
|
||||
ASM_LN
|
||||
: "=&r" (a) // "=a"
|
||||
: "i" (EFALGS_CPUID_BIT)
|
||||
);
|
||||
if ((a & (1 << EFALGS_CPUID_BIT)) == 0)
|
||||
return 0;
|
||||
#endif
|
||||
{
|
||||
UInt32 p[4];
|
||||
x86_cpuid_MACRO(p, 0)
|
||||
return p[0];
|
||||
}
|
||||
}
|
||||
|
||||
#undef ASM_LN
|
||||
|
||||
#elif !defined(_MSC_VER)
|
||||
|
||||
/*
|
||||
// for gcc/clang and other: we can try to use __cpuid macro:
|
||||
#include <cpuid.h>
|
||||
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
|
||||
{
|
||||
__cpuid(func, p[0], p[1], p[2], p[3]);
|
||||
}
|
||||
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
|
||||
{
|
||||
return (UInt32)__get_cpuid_max(0, NULL);
|
||||
}
|
||||
*/
|
||||
// for unsupported cpuid:
|
||||
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
|
||||
{
|
||||
UNUSED_VAR(func)
|
||||
p[0] = p[1] = p[2] = p[3] = 0;
|
||||
}
|
||||
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else // _MSC_VER
|
||||
|
||||
#if !defined(MY_CPU_AMD64)
|
||||
|
||||
UInt32 __declspec(naked) Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
|
||||
{
|
||||
#if defined(NEED_CHECK_FOR_CPUID)
|
||||
#define EFALGS_CPUID_BIT 21
|
||||
__asm pushfd
|
||||
__asm pushfd
|
||||
/*
|
||||
__asm pop eax
|
||||
// __asm mov edx, eax
|
||||
__asm btc eax, EFALGS_CPUID_BIT
|
||||
__asm push eax
|
||||
*/
|
||||
__asm btc dword ptr [esp], EFALGS_CPUID_BIT
|
||||
__asm popfd
|
||||
__asm pushfd
|
||||
__asm pop eax
|
||||
// __asm xor eax, edx
|
||||
__asm xor eax, [esp]
|
||||
// __asm push edx
|
||||
__asm popfd
|
||||
__asm and eax, (1 shl EFALGS_CPUID_BIT)
|
||||
__asm jz end_func
|
||||
#endif
|
||||
__asm push ebx
|
||||
__asm xor eax, eax // func
|
||||
__asm xor ecx, ecx // subFunction (optional) for (func == 0)
|
||||
__asm cpuid
|
||||
__asm pop ebx
|
||||
#if defined(NEED_CHECK_FOR_CPUID)
|
||||
end_func:
|
||||
#endif
|
||||
__asm ret 0
|
||||
}
|
||||
|
||||
void __declspec(naked) Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
|
||||
{
|
||||
UNUSED_VAR(p)
|
||||
UNUSED_VAR(func)
|
||||
__asm push ebx
|
||||
__asm push edi
|
||||
__asm mov edi, ecx // p
|
||||
__asm mov eax, edx // func
|
||||
__asm xor ecx, ecx // subfunction (optional) for (func == 0)
|
||||
__asm cpuid
|
||||
__asm mov [edi ], eax
|
||||
__asm mov [edi + 4], ebx
|
||||
__asm mov [edi + 8], ecx
|
||||
__asm mov [edi + 12], edx
|
||||
__asm pop edi
|
||||
__asm pop ebx
|
||||
__asm ret 0
|
||||
}
|
||||
|
||||
#else // MY_CPU_AMD64
|
||||
|
||||
#if _MSC_VER >= 1600
|
||||
#include <intrin.h>
|
||||
#define MY_cpuidex __cpuidex
|
||||
#else
|
||||
/*
|
||||
__cpuid (func == (0 or 7)) requires subfunction number in ECX.
|
||||
MSDN: The __cpuid intrinsic clears the ECX register before calling the cpuid instruction.
|
||||
__cpuid() in new MSVC clears ECX.
|
||||
__cpuid() in old MSVC (14.00) x64 doesn't clear ECX
|
||||
We still can use __cpuid for low (func) values that don't require ECX,
|
||||
but __cpuid() in old MSVC will be incorrect for some func values: (func == 7).
|
||||
So here we use the hack for old MSVC to send (subFunction) in ECX register to cpuid instruction,
|
||||
where ECX value is first parameter for FASTCALL / NO_INLINE func,
|
||||
So the caller of MY_cpuidex_HACK() sets ECX as subFunction, and
|
||||
old MSVC for __cpuid() doesn't change ECX and cpuid instruction gets (subFunction) value.
|
||||
|
||||
DON'T remove Z7_NO_INLINE and Z7_FASTCALL for MY_cpuidex_HACK(): !!!
|
||||
*/
|
||||
static
|
||||
Z7_NO_INLINE void Z7_FASTCALL MY_cpuidex_HACK(UInt32 subFunction, UInt32 func, int *CPUInfo)
|
||||
{
|
||||
UNUSED_VAR(subFunction)
|
||||
__cpuid(CPUInfo, func);
|
||||
}
|
||||
#define MY_cpuidex(info, func, func2) MY_cpuidex_HACK(func2, func, info)
|
||||
#pragma message("======== MY_cpuidex_HACK WAS USED ========")
|
||||
#endif // _MSC_VER >= 1600
|
||||
|
||||
#if !defined(MY_CPU_AMD64)
|
||||
/* inlining for __cpuid() in MSVC x86 (32-bit) produces big ineffective code,
|
||||
so we disable inlining here */
|
||||
Z7_NO_INLINE
|
||||
#endif
|
||||
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
|
||||
{
|
||||
MY_cpuidex((int *)p, (int)func, 0);
|
||||
}
|
||||
|
||||
Z7_NO_INLINE
|
||||
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
|
||||
{
|
||||
int a[4];
|
||||
MY_cpuidex(a, 0, 0);
|
||||
return a[0];
|
||||
}
|
||||
|
||||
#endif // MY_CPU_AMD64
|
||||
#endif // _MSC_VER
|
||||
|
||||
#if defined(NEED_CHECK_FOR_CPUID)
|
||||
#define CHECK_CPUID_IS_SUPPORTED { if (z7_x86_cpuid_GetMaxFunc() == 0) return 0; }
|
||||
#else
|
||||
#define CHECK_CPUID_IS_SUPPORTED
|
||||
#endif
|
||||
#undef NEED_CHECK_FOR_CPUID
|
||||
|
||||
#ifndef USE_ASM
|
||||
#ifdef _MSC_VER
|
||||
#if _MSC_VER >= 1600
|
||||
#define MY__cpuidex __cpuidex
|
||||
#else
|
||||
|
||||
/*
|
||||
__cpuid (function == 4) requires subfunction number in ECX.
|
||||
MSDN: The __cpuid intrinsic clears the ECX register before calling the cpuid instruction.
|
||||
__cpuid() in new MSVC clears ECX.
|
||||
__cpuid() in old MSVC (14.00) doesn't clear ECX
|
||||
We still can use __cpuid for low (function) values that don't require ECX,
|
||||
but __cpuid() in old MSVC will be incorrect for some function values: (function == 4).
|
||||
So here we use the hack for old MSVC to send (subFunction) in ECX register to cpuid instruction,
|
||||
where ECX value is first parameter for FAST_CALL / NO_INLINE function,
|
||||
So the caller of MY__cpuidex_HACK() sets ECX as subFunction, and
|
||||
old MSVC for __cpuid() doesn't change ECX and cpuid instruction gets (subFunction) value.
|
||||
|
||||
DON'T remove MY_NO_INLINE and MY_FAST_CALL for MY__cpuidex_HACK() !!!
|
||||
*/
|
||||
|
||||
static
|
||||
MY_NO_INLINE
|
||||
void MY_FAST_CALL MY__cpuidex_HACK(UInt32 subFunction, int *CPUInfo, UInt32 function)
|
||||
{
|
||||
UNUSED_VAR(subFunction);
|
||||
__cpuid(CPUInfo, function);
|
||||
}
|
||||
|
||||
#define MY__cpuidex(info, func, func2) MY__cpuidex_HACK(func2, info, func)
|
||||
#pragma message("======== MY__cpuidex_HACK WAS USED ========")
|
||||
#endif
|
||||
#else
|
||||
#define MY__cpuidex(info, func, func2) __cpuid(info, func)
|
||||
#pragma message("======== (INCORRECT ?) cpuid WAS USED ========")
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
|
||||
{
|
||||
#ifdef USE_ASM
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
UInt32 a2, b2, c2, d2;
|
||||
__asm xor EBX, EBX;
|
||||
__asm xor ECX, ECX;
|
||||
__asm xor EDX, EDX;
|
||||
__asm mov EAX, function;
|
||||
__asm cpuid;
|
||||
__asm mov a2, EAX;
|
||||
__asm mov b2, EBX;
|
||||
__asm mov c2, ECX;
|
||||
__asm mov d2, EDX;
|
||||
|
||||
*a = a2;
|
||||
*b = b2;
|
||||
*c = c2;
|
||||
*d = d2;
|
||||
|
||||
#else
|
||||
|
||||
__asm__ __volatile__ (
|
||||
#if defined(MY_CPU_AMD64) && defined(__PIC__)
|
||||
"mov %%rbx, %%rdi;"
|
||||
"cpuid;"
|
||||
"xchg %%rbx, %%rdi;"
|
||||
: "=a" (*a) ,
|
||||
"=D" (*b) ,
|
||||
#elif defined(MY_CPU_X86) && defined(__PIC__)
|
||||
"mov %%ebx, %%edi;"
|
||||
"cpuid;"
|
||||
"xchgl %%ebx, %%edi;"
|
||||
: "=a" (*a) ,
|
||||
"=D" (*b) ,
|
||||
#else
|
||||
"cpuid"
|
||||
: "=a" (*a) ,
|
||||
"=b" (*b) ,
|
||||
#endif
|
||||
"=c" (*c) ,
|
||||
"=d" (*d)
|
||||
: "0" (function), "c"(0) ) ;
|
||||
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
int CPUInfo[4];
|
||||
|
||||
MY__cpuidex(CPUInfo, (int)function, 0);
|
||||
|
||||
*a = (UInt32)CPUInfo[0];
|
||||
*b = (UInt32)CPUInfo[1];
|
||||
*c = (UInt32)CPUInfo[2];
|
||||
*d = (UInt32)CPUInfo[3];
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
BoolInt x86cpuid_CheckAndRead(Cx86cpuid *p)
|
||||
BoolInt x86cpuid_Func_1(UInt32 *p)
|
||||
{
|
||||
CHECK_CPUID_IS_SUPPORTED
|
||||
MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
|
||||
MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
|
||||
z7_x86_cpuid(p, 1);
|
||||
return True;
|
||||
}
|
||||
|
||||
static const UInt32 kVendors[][3] =
|
||||
/*
|
||||
static const UInt32 kVendors[][1] =
|
||||
{
|
||||
{ 0x756E6547, 0x49656E69, 0x6C65746E},
|
||||
{ 0x68747541, 0x69746E65, 0x444D4163},
|
||||
{ 0x746E6543, 0x48727561, 0x736C7561}
|
||||
{ 0x756E6547 }, // , 0x49656E69, 0x6C65746E },
|
||||
{ 0x68747541 }, // , 0x69746E65, 0x444D4163 },
|
||||
{ 0x746E6543 } // , 0x48727561, 0x736C7561 }
|
||||
};
|
||||
*/
|
||||
|
||||
/*
|
||||
typedef struct
|
||||
{
|
||||
UInt32 maxFunc;
|
||||
UInt32 vendor[3];
|
||||
UInt32 ver;
|
||||
UInt32 b;
|
||||
UInt32 c;
|
||||
UInt32 d;
|
||||
} Cx86cpuid;
|
||||
|
||||
enum
|
||||
{
|
||||
CPU_FIRM_INTEL,
|
||||
CPU_FIRM_AMD,
|
||||
CPU_FIRM_VIA
|
||||
};
|
||||
int x86cpuid_GetFirm(const Cx86cpuid *p);
|
||||
#define x86cpuid_ver_GetFamily(ver) (((ver >> 16) & 0xff0) | ((ver >> 8) & 0xf))
|
||||
#define x86cpuid_ver_GetModel(ver) (((ver >> 12) & 0xf0) | ((ver >> 4) & 0xf))
|
||||
#define x86cpuid_ver_GetStepping(ver) (ver & 0xf)
|
||||
|
||||
int x86cpuid_GetFirm(const Cx86cpuid *p)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
|
||||
for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[0]); i++)
|
||||
{
|
||||
const UInt32 *v = kVendors[i];
|
||||
if (v[0] == p->vendor[0] &&
|
||||
v[1] == p->vendor[1] &&
|
||||
v[2] == p->vendor[2])
|
||||
if (v[0] == p->vendor[0]
|
||||
// && v[1] == p->vendor[1]
|
||||
// && v[2] == p->vendor[2]
|
||||
)
|
||||
return (int)i;
|
||||
}
|
||||
return -1;
|
||||
|
@ -190,41 +321,55 @@ int x86cpuid_GetFirm(const Cx86cpuid *p)
|
|||
BoolInt CPU_Is_InOrder()
|
||||
{
|
||||
Cx86cpuid p;
|
||||
int firm;
|
||||
UInt32 family, model;
|
||||
if (!x86cpuid_CheckAndRead(&p))
|
||||
return True;
|
||||
|
||||
family = x86cpuid_GetFamily(p.ver);
|
||||
model = x86cpuid_GetModel(p.ver);
|
||||
|
||||
firm = x86cpuid_GetFirm(&p);
|
||||
family = x86cpuid_ver_GetFamily(p.ver);
|
||||
model = x86cpuid_ver_GetModel(p.ver);
|
||||
|
||||
switch (firm)
|
||||
switch (x86cpuid_GetFirm(&p))
|
||||
{
|
||||
case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && (
|
||||
/* In-Order Atom CPU */
|
||||
model == 0x1C /* 45 nm, N4xx, D4xx, N5xx, D5xx, 230, 330 */
|
||||
|| model == 0x26 /* 45 nm, Z6xx */
|
||||
|| model == 0x27 /* 32 nm, Z2460 */
|
||||
|| model == 0x35 /* 32 nm, Z2760 */
|
||||
|| model == 0x36 /* 32 nm, N2xxx, D2xxx */
|
||||
// In-Order Atom CPU
|
||||
model == 0x1C // 45 nm, N4xx, D4xx, N5xx, D5xx, 230, 330
|
||||
|| model == 0x26 // 45 nm, Z6xx
|
||||
|| model == 0x27 // 32 nm, Z2460
|
||||
|| model == 0x35 // 32 nm, Z2760
|
||||
|| model == 0x36 // 32 nm, N2xxx, D2xxx
|
||||
)));
|
||||
case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
|
||||
case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
|
||||
}
|
||||
return True;
|
||||
return False; // v23 : unknown processors are not In-Order
|
||||
}
|
||||
*/
|
||||
|
||||
#ifdef _WIN32
|
||||
#include "7zWindows.h"
|
||||
#endif
|
||||
|
||||
#if !defined(MY_CPU_AMD64) && defined(_WIN32)
|
||||
#include <Windows.h>
|
||||
static BoolInt CPU_Sys_Is_SSE_Supported()
|
||||
|
||||
/* for legacy SSE ia32: there is no user-space cpu instruction to check
|
||||
that OS supports SSE register storing/restoring on context switches.
|
||||
So we need some OS-specific function to check that it's safe to use SSE registers.
|
||||
*/
|
||||
|
||||
Z7_FORCE_INLINE
|
||||
static BoolInt CPU_Sys_Is_SSE_Supported(void)
|
||||
{
|
||||
OSVERSIONINFO vi;
|
||||
vi.dwOSVersionInfoSize = sizeof(vi);
|
||||
if (!GetVersionEx(&vi))
|
||||
return False;
|
||||
return (vi.dwMajorVersion >= 5);
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4996) // `GetVersion': was declared deprecated
|
||||
#endif
|
||||
/* low byte is major version of Windows
|
||||
We suppose that any Windows version since
|
||||
Windows2000 (major == 5) supports SSE registers */
|
||||
return (Byte)GetVersion() >= 5;
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
}
|
||||
#define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
|
||||
#else
|
||||
|
@ -232,94 +377,300 @@ static BoolInt CPU_Sys_Is_SSE_Supported()
|
|||
#endif
|
||||
|
||||
|
||||
static UInt32 X86_CPUID_ECX_Get_Flags()
|
||||
#if !defined(MY_CPU_AMD64)
|
||||
|
||||
BoolInt CPU_IsSupported_CMOV(void)
|
||||
{
|
||||
Cx86cpuid p;
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
if (!x86cpuid_CheckAndRead(&p))
|
||||
UInt32 a[4];
|
||||
if (!x86cpuid_Func_1(&a[0]))
|
||||
return 0;
|
||||
return p.c;
|
||||
return (a[3] >> 15) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_AES()
|
||||
BoolInt CPU_IsSupported_SSE(void)
|
||||
{
|
||||
return (X86_CPUID_ECX_Get_Flags() >> 25) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_SSSE3()
|
||||
{
|
||||
return (X86_CPUID_ECX_Get_Flags() >> 9) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_SSE41()
|
||||
{
|
||||
return (X86_CPUID_ECX_Get_Flags() >> 19) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_SHA()
|
||||
{
|
||||
Cx86cpuid p;
|
||||
UInt32 a[4];
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
if (!x86cpuid_CheckAndRead(&p))
|
||||
return False;
|
||||
if (!x86cpuid_Func_1(&a[0]))
|
||||
return 0;
|
||||
return (a[3] >> 25) & 1;
|
||||
}
|
||||
|
||||
if (p.maxFunc < 7)
|
||||
BoolInt CPU_IsSupported_SSE2(void)
|
||||
{
|
||||
UInt32 a[4];
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
if (!x86cpuid_Func_1(&a[0]))
|
||||
return 0;
|
||||
return (a[3] >> 26) & 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
static UInt32 x86cpuid_Func_1_ECX(void)
|
||||
{
|
||||
UInt32 a[4];
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
if (!x86cpuid_Func_1(&a[0]))
|
||||
return 0;
|
||||
return a[2];
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_AES(void)
|
||||
{
|
||||
return (x86cpuid_Func_1_ECX() >> 25) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_SSSE3(void)
|
||||
{
|
||||
return (x86cpuid_Func_1_ECX() >> 9) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_SSE41(void)
|
||||
{
|
||||
return (x86cpuid_Func_1_ECX() >> 19) & 1;
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_SHA(void)
|
||||
{
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
|
||||
if (z7_x86_cpuid_GetMaxFunc() < 7)
|
||||
return False;
|
||||
{
|
||||
UInt32 d[4] = { 0 };
|
||||
MyCPUID(7, &d[0], &d[1], &d[2], &d[3]);
|
||||
UInt32 d[4];
|
||||
z7_x86_cpuid(d, 7);
|
||||
return (d[1] >> 29) & 1;
|
||||
}
|
||||
}
|
||||
|
||||
// #include <stdio.h>
|
||||
/*
|
||||
MSVC: _xgetbv() intrinsic is available since VS2010SP1.
|
||||
MSVC also defines (_XCR_XFEATURE_ENABLED_MASK) macro in
|
||||
<immintrin.h> that we can use or check.
|
||||
For any 32-bit x86 we can use asm code in MSVC,
|
||||
but MSVC asm code is huge after compilation.
|
||||
So _xgetbv() is better
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <Windows.h>
|
||||
ICC: _xgetbv() intrinsic is available (in what version of ICC?)
|
||||
ICC defines (__GNUC___) and it supports gnu assembler
|
||||
also ICC supports MASM style code with -use-msasm switch.
|
||||
but ICC doesn't support __attribute__((__target__))
|
||||
|
||||
GCC/CLANG 9:
|
||||
_xgetbv() is macro that works via __builtin_ia32_xgetbv()
|
||||
and we need __attribute__((__target__("xsave")).
|
||||
But with __target__("xsave") the function will be not
|
||||
inlined to function that has no __target__("xsave") attribute.
|
||||
If we want _xgetbv() call inlining, then we should use asm version
|
||||
instead of calling _xgetbv().
|
||||
Note:intrinsic is broke before GCC 8.2:
|
||||
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85684
|
||||
*/
|
||||
|
||||
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) \
|
||||
|| defined(_MSC_VER) && (_MSC_VER >= 1600) && (_MSC_FULL_VER >= 160040219) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 9) \
|
||||
|| defined(__clang__) && (__clang_major__ >= 9)
|
||||
// we define ATTRIB_XGETBV, if we want to use predefined _xgetbv() from compiler
|
||||
#if defined(__INTEL_COMPILER)
|
||||
#define ATTRIB_XGETBV
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
// we don't define ATTRIB_XGETBV here, because asm version is better for inlining.
|
||||
// #define ATTRIB_XGETBV __attribute__((__target__("xsave")))
|
||||
#else
|
||||
#define ATTRIB_XGETBV
|
||||
#endif
|
||||
#endif
|
||||
|
||||
BoolInt CPU_IsSupported_AVX2()
|
||||
{
|
||||
Cx86cpuid p;
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
#if defined(ATTRIB_XGETBV)
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
||||
|
||||
// XFEATURE_ENABLED_MASK/XCR0
|
||||
#define MY_XCR_XFEATURE_ENABLED_MASK 0
|
||||
|
||||
#if defined(ATTRIB_XGETBV)
|
||||
ATTRIB_XGETBV
|
||||
#endif
|
||||
static UInt64 x86_xgetbv_0(UInt32 num)
|
||||
{
|
||||
#if defined(ATTRIB_XGETBV)
|
||||
{
|
||||
return
|
||||
#if (defined(_MSC_VER))
|
||||
_xgetbv(num);
|
||||
#else
|
||||
__builtin_ia32_xgetbv(
|
||||
#if !defined(__clang__)
|
||||
(int)
|
||||
#endif
|
||||
num);
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(__GNUC__) || defined(__clang__) || defined(__SUNPRO_CC)
|
||||
|
||||
UInt32 a, d;
|
||||
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
|
||||
__asm__
|
||||
(
|
||||
"xgetbv"
|
||||
: "=a"(a), "=d"(d) : "c"(num) : "cc"
|
||||
);
|
||||
#else // is old gcc
|
||||
__asm__
|
||||
(
|
||||
".byte 0x0f, 0x01, 0xd0" "\n\t"
|
||||
: "=a"(a), "=d"(d) : "c"(num) : "cc"
|
||||
);
|
||||
#endif
|
||||
return ((UInt64)d << 32) | a;
|
||||
// return a;
|
||||
|
||||
#elif defined(_MSC_VER) && !defined(MY_CPU_AMD64)
|
||||
|
||||
UInt32 a, d;
|
||||
__asm {
|
||||
push eax
|
||||
push edx
|
||||
push ecx
|
||||
mov ecx, num;
|
||||
// xor ecx, ecx // = MY_XCR_XFEATURE_ENABLED_MASK
|
||||
_emit 0x0f
|
||||
_emit 0x01
|
||||
_emit 0xd0
|
||||
mov a, eax
|
||||
mov d, edx
|
||||
pop ecx
|
||||
pop edx
|
||||
pop eax
|
||||
}
|
||||
return ((UInt64)d << 32) | a;
|
||||
// return a;
|
||||
|
||||
#else // it's unknown compiler
|
||||
// #error "Need xgetbv function"
|
||||
UNUSED_VAR(num)
|
||||
// for MSVC-X64 we could call external function from external file.
|
||||
/* Actually we had checked OSXSAVE/AVX in cpuid before.
|
||||
So it's expected that OS supports at least AVX and below. */
|
||||
// if (num != MY_XCR_XFEATURE_ENABLED_MASK) return 0; // if not XCR0
|
||||
return
|
||||
// (1 << 0) | // x87
|
||||
(1 << 1) // SSE
|
||||
| (1 << 2); // AVX
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
Windows versions do not know about new ISA extensions that
|
||||
can be introduced. But we still can use new extensions,
|
||||
even if Windows doesn't report about supporting them,
|
||||
But we can use new extensions, only if Windows knows about new ISA extension
|
||||
that changes the number or size of registers: SSE, AVX/XSAVE, AVX512
|
||||
So it's enough to check
|
||||
MY_PF_AVX_INSTRUCTIONS_AVAILABLE
|
||||
instead of
|
||||
MY_PF_AVX2_INSTRUCTIONS_AVAILABLE
|
||||
*/
|
||||
#define MY_PF_XSAVE_ENABLED 17
|
||||
// #define MY_PF_SSSE3_INSTRUCTIONS_AVAILABLE 36
|
||||
// #define MY_PF_SSE4_1_INSTRUCTIONS_AVAILABLE 37
|
||||
// #define MY_PF_SSE4_2_INSTRUCTIONS_AVAILABLE 38
|
||||
// #define MY_PF_AVX_INSTRUCTIONS_AVAILABLE 39
|
||||
// #define MY_PF_AVX2_INSTRUCTIONS_AVAILABLE 40
|
||||
// #define MY_PF_AVX512F_INSTRUCTIONS_AVAILABLE 41
|
||||
#endif
|
||||
|
||||
BoolInt CPU_IsSupported_AVX(void)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
#define MY__PF_XSAVE_ENABLED 17
|
||||
if (!IsProcessorFeaturePresent(MY__PF_XSAVE_ENABLED))
|
||||
if (!IsProcessorFeaturePresent(MY_PF_XSAVE_ENABLED))
|
||||
return False;
|
||||
/* PF_AVX_INSTRUCTIONS_AVAILABLE probably is supported starting from
|
||||
some latest Win10 revisions. But we need AVX in older Windows also.
|
||||
So we don't use the following check: */
|
||||
/*
|
||||
if (!IsProcessorFeaturePresent(MY_PF_AVX_INSTRUCTIONS_AVAILABLE))
|
||||
return False;
|
||||
*/
|
||||
#endif
|
||||
|
||||
if (!x86cpuid_CheckAndRead(&p))
|
||||
/*
|
||||
OS must use new special XSAVE/XRSTOR instructions to save
|
||||
AVX registers when it required for context switching.
|
||||
At OS statring:
|
||||
OS sets CR4.OSXSAVE flag to signal the processor that OS supports the XSAVE extensions.
|
||||
Also OS sets bitmask in XCR0 register that defines what
|
||||
registers will be processed by XSAVE instruction:
|
||||
XCR0.SSE[bit 0] - x87 registers and state
|
||||
XCR0.SSE[bit 1] - SSE registers and state
|
||||
XCR0.AVX[bit 2] - AVX registers and state
|
||||
CR4.OSXSAVE is reflected to CPUID.1:ECX.OSXSAVE[bit 27].
|
||||
So we can read that bit in user-space.
|
||||
XCR0 is available for reading in user-space by new XGETBV instruction.
|
||||
*/
|
||||
{
|
||||
const UInt32 c = x86cpuid_Func_1_ECX();
|
||||
if (0 == (1
|
||||
& (c >> 28) // AVX instructions are supported by hardware
|
||||
& (c >> 27))) // OSXSAVE bit: XSAVE and related instructions are enabled by OS.
|
||||
return False;
|
||||
}
|
||||
|
||||
/* also we can check
|
||||
CPUID.1:ECX.XSAVE [bit 26] : that shows that
|
||||
XSAVE, XRESTOR, XSETBV, XGETBV instructions are supported by hardware.
|
||||
But that check is redundant, because if OSXSAVE bit is set, then XSAVE is also set */
|
||||
|
||||
/* If OS have enabled XSAVE extension instructions (OSXSAVE == 1),
|
||||
in most cases we expect that OS also will support storing/restoring
|
||||
for AVX and SSE states at least.
|
||||
But to be ensure for that we call user-space instruction
|
||||
XGETBV(0) to get XCR0 value that contains bitmask that defines
|
||||
what exact states(registers) OS have enabled for storing/restoring.
|
||||
*/
|
||||
|
||||
{
|
||||
const UInt32 bm = (UInt32)x86_xgetbv_0(MY_XCR_XFEATURE_ENABLED_MASK);
|
||||
// printf("\n=== XGetBV=%d\n", bm);
|
||||
return 1
|
||||
& (bm >> 1) // SSE state is supported (set by OS) for storing/restoring
|
||||
& (bm >> 2); // AVX state is supported (set by OS) for storing/restoring
|
||||
}
|
||||
// since Win7SP1: we can use GetEnabledXStateFeatures();
|
||||
}
|
||||
|
||||
|
||||
BoolInt CPU_IsSupported_AVX2(void)
|
||||
{
|
||||
if (!CPU_IsSupported_AVX())
|
||||
return False;
|
||||
if (p.maxFunc < 7)
|
||||
if (z7_x86_cpuid_GetMaxFunc() < 7)
|
||||
return False;
|
||||
{
|
||||
UInt32 d[4] = { 0 };
|
||||
MyCPUID(7, &d[0], &d[1], &d[2], &d[3]);
|
||||
UInt32 d[4];
|
||||
z7_x86_cpuid(d, 7);
|
||||
// printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
|
||||
return 1
|
||||
& (d[1] >> 5); // avx2
|
||||
}
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_VAES_AVX2()
|
||||
BoolInt CPU_IsSupported_VAES_AVX2(void)
|
||||
{
|
||||
Cx86cpuid p;
|
||||
CHECK_SYS_SSE_SUPPORT
|
||||
|
||||
#ifdef _WIN32
|
||||
#define MY__PF_XSAVE_ENABLED 17
|
||||
if (!IsProcessorFeaturePresent(MY__PF_XSAVE_ENABLED))
|
||||
if (!CPU_IsSupported_AVX())
|
||||
return False;
|
||||
#endif
|
||||
|
||||
if (!x86cpuid_CheckAndRead(&p))
|
||||
return False;
|
||||
if (p.maxFunc < 7)
|
||||
if (z7_x86_cpuid_GetMaxFunc() < 7)
|
||||
return False;
|
||||
{
|
||||
UInt32 d[4] = { 0 };
|
||||
MyCPUID(7, &d[0], &d[1], &d[2], &d[3]);
|
||||
UInt32 d[4];
|
||||
z7_x86_cpuid(d, 7);
|
||||
// printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
|
||||
return 1
|
||||
& (d[1] >> 5) // avx2
|
||||
|
@ -328,20 +679,15 @@ BoolInt CPU_IsSupported_VAES_AVX2()
|
|||
}
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_PageGB()
|
||||
BoolInt CPU_IsSupported_PageGB(void)
|
||||
{
|
||||
Cx86cpuid cpuid;
|
||||
if (!x86cpuid_CheckAndRead(&cpuid))
|
||||
return False;
|
||||
CHECK_CPUID_IS_SUPPORTED
|
||||
{
|
||||
UInt32 d[4] = { 0 };
|
||||
MyCPUID(0x80000000, &d[0], &d[1], &d[2], &d[3]);
|
||||
UInt32 d[4];
|
||||
z7_x86_cpuid(d, 0x80000000);
|
||||
if (d[0] < 0x80000001)
|
||||
return False;
|
||||
}
|
||||
{
|
||||
UInt32 d[4] = { 0 };
|
||||
MyCPUID(0x80000001, &d[0], &d[1], &d[2], &d[3]);
|
||||
z7_x86_cpuid(d, 0x80000001);
|
||||
return (d[3] >> 26) & 1;
|
||||
}
|
||||
}
|
||||
|
@ -351,11 +697,11 @@ BoolInt CPU_IsSupported_PageGB()
|
|||
|
||||
#ifdef _WIN32
|
||||
|
||||
#include <Windows.h>
|
||||
#include "7zWindows.h"
|
||||
|
||||
BoolInt CPU_IsSupported_CRC32() { return IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
|
||||
BoolInt CPU_IsSupported_CRYPTO() { return IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
|
||||
BoolInt CPU_IsSupported_NEON() { return IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
|
||||
BoolInt CPU_IsSupported_CRC32(void) { return IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
|
||||
BoolInt CPU_IsSupported_CRYPTO(void) { return IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
|
||||
BoolInt CPU_IsSupported_NEON(void) { return IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
|
||||
|
||||
#else
|
||||
|
||||
|
@ -378,28 +724,27 @@ static void Print_sysctlbyname(const char *name)
|
|||
}
|
||||
}
|
||||
*/
|
||||
/*
|
||||
Print_sysctlbyname("hw.pagesize");
|
||||
Print_sysctlbyname("machdep.cpu.brand_string");
|
||||
*/
|
||||
|
||||
static BoolInt My_sysctlbyname_Get_BoolInt(const char *name)
|
||||
static BoolInt z7_sysctlbyname_Get_BoolInt(const char *name)
|
||||
{
|
||||
UInt32 val = 0;
|
||||
if (My_sysctlbyname_Get_UInt32(name, &val) == 0 && val == 1)
|
||||
if (z7_sysctlbyname_Get_UInt32(name, &val) == 0 && val == 1)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Print_sysctlbyname("hw.pagesize");
|
||||
Print_sysctlbyname("machdep.cpu.brand_string");
|
||||
*/
|
||||
|
||||
BoolInt CPU_IsSupported_CRC32(void)
|
||||
{
|
||||
return My_sysctlbyname_Get_BoolInt("hw.optional.armv8_crc32");
|
||||
return z7_sysctlbyname_Get_BoolInt("hw.optional.armv8_crc32");
|
||||
}
|
||||
|
||||
BoolInt CPU_IsSupported_NEON(void)
|
||||
{
|
||||
return My_sysctlbyname_Get_BoolInt("hw.optional.neon");
|
||||
return z7_sysctlbyname_Get_BoolInt("hw.optional.neon");
|
||||
}
|
||||
|
||||
#ifdef MY_CPU_ARM64
|
||||
|
@ -461,15 +806,15 @@ MY_HWCAP_CHECK_FUNC (AES)
|
|||
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
int My_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize)
|
||||
int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize)
|
||||
{
|
||||
return sysctlbyname(name, buf, bufSize, NULL, 0);
|
||||
}
|
||||
|
||||
int My_sysctlbyname_Get_UInt32(const char *name, UInt32 *val)
|
||||
int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val)
|
||||
{
|
||||
size_t bufSize = sizeof(*val);
|
||||
int res = My_sysctlbyname_Get(name, val, &bufSize);
|
||||
const int res = z7_sysctlbyname_Get(name, val, &bufSize);
|
||||
if (res == 0 && bufSize != sizeof(*val))
|
||||
return EFAULT;
|
||||
return res;
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
/* DllSecur.c -- DLL loading security
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#include "7zWindows.h"
|
||||
|
||||
#include "DllSecur.h"
|
||||
|
||||
#ifndef UNDER_CE
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 8)) || defined(__clang__)
|
||||
// #pragma GCC diagnostic ignored "-Wcast-function-type"
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
typedef void (*Z7_voidFunction)(void);
|
||||
#define MY_CAST_FUNC (Z7_voidFunction)
|
||||
#elif defined(_MSC_VER) && _MSC_VER > 1920
|
||||
#define MY_CAST_FUNC (void *)
|
||||
// #pragma warning(disable : 4191) // 'type cast': unsafe conversion from 'FARPROC' to 'void (__cdecl *)()'
|
||||
#else
|
||||
#define MY_CAST_FUNC
|
||||
#endif
|
||||
|
||||
typedef BOOL (WINAPI *Func_SetDefaultDllDirectories)(DWORD DirectoryFlags);
|
||||
|
||||
#define MY_LOAD_LIBRARY_SEARCH_USER_DIRS 0x400
|
||||
#define MY_LOAD_LIBRARY_SEARCH_SYSTEM32 0x800
|
||||
|
||||
#define DELIM "\0"
|
||||
|
||||
static const char * const g_Dlls =
|
||||
"userenv"
|
||||
DELIM "setupapi"
|
||||
DELIM "apphelp"
|
||||
DELIM "propsys"
|
||||
DELIM "dwmapi"
|
||||
DELIM "cryptbase"
|
||||
DELIM "oleacc"
|
||||
DELIM "clbcatq"
|
||||
DELIM "version"
|
||||
#ifndef _CONSOLE
|
||||
DELIM "uxtheme"
|
||||
#endif
|
||||
DELIM;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
#endif
|
||||
#if defined (_MSC_VER) && _MSC_VER >= 1900
|
||||
// sysinfoapi.h: kit10: GetVersion was declared deprecated
|
||||
#pragma warning(disable : 4996)
|
||||
#endif
|
||||
|
||||
#define IF_NON_VISTA_SET_DLL_DIRS_AND_RETURN \
|
||||
if ((UInt16)GetVersion() != 6) { \
|
||||
const \
|
||||
Func_SetDefaultDllDirectories setDllDirs = \
|
||||
(Func_SetDefaultDllDirectories) MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), \
|
||||
"SetDefaultDllDirectories"); \
|
||||
if (setDllDirs) if (setDllDirs(MY_LOAD_LIBRARY_SEARCH_SYSTEM32 | MY_LOAD_LIBRARY_SEARCH_USER_DIRS)) return; }
|
||||
|
||||
void My_SetDefaultDllDirectories(void)
|
||||
{
|
||||
#ifndef UNDER_CE
|
||||
IF_NON_VISTA_SET_DLL_DIRS_AND_RETURN
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void LoadSecurityDlls(void)
|
||||
{
|
||||
#ifndef UNDER_CE
|
||||
// at Vista (ver 6.0) : CoCreateInstance(CLSID_ShellLink, ...) doesn't work after SetDefaultDllDirectories() : Check it ???
|
||||
IF_NON_VISTA_SET_DLL_DIRS_AND_RETURN
|
||||
{
|
||||
wchar_t buf[MAX_PATH + 100];
|
||||
const char *dll;
|
||||
unsigned pos = GetSystemDirectoryW(buf, MAX_PATH + 2);
|
||||
if (pos == 0 || pos > MAX_PATH)
|
||||
return;
|
||||
if (buf[pos - 1] != '\\')
|
||||
buf[pos++] = '\\';
|
||||
for (dll = g_Dlls; *dll != 0;)
|
||||
{
|
||||
wchar_t *dest = &buf[pos];
|
||||
for (;;)
|
||||
{
|
||||
const char c = *dll++;
|
||||
if (c == 0)
|
||||
break;
|
||||
*dest++ = (Byte)c;
|
||||
}
|
||||
dest[0] = '.';
|
||||
dest[1] = 'd';
|
||||
dest[2] = 'l';
|
||||
dest[3] = 'l';
|
||||
dest[4] = 0;
|
||||
// lstrcatW(buf, L".dll");
|
||||
LoadLibraryExW(buf, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // _WIN32
|
|
@ -1,5 +1,5 @@
|
|||
/* LzFind.c -- Match finder for LZ algorithms
|
||||
2021-11-29 : Igor Pavlov : Public domain */
|
||||
2023-03-14 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
|||
#define kEmptyHashValue 0
|
||||
|
||||
#define kMaxValForNormalize ((UInt32)0)
|
||||
// #define kMaxValForNormalize ((UInt32)(1 << 20) + 0xFFF) // for debug
|
||||
// #define kMaxValForNormalize ((UInt32)(1 << 20) + 0xfff) // for debug
|
||||
|
||||
// #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
|
||||
|
||||
|
@ -67,10 +67,10 @@
|
|||
|
||||
static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc)
|
||||
{
|
||||
if (!p->directInput)
|
||||
// if (!p->directInput)
|
||||
{
|
||||
ISzAlloc_Free(alloc, p->bufferBase);
|
||||
p->bufferBase = NULL;
|
||||
ISzAlloc_Free(alloc, p->bufBase);
|
||||
p->bufBase = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ static int LzInWindow_Create2(CMatchFinder *p, UInt32 blockSize, ISzAllocPtr all
|
|||
{
|
||||
if (blockSize == 0)
|
||||
return 0;
|
||||
if (!p->bufferBase || p->blockSize != blockSize)
|
||||
if (!p->bufBase || p->blockSize != blockSize)
|
||||
{
|
||||
// size_t blockSizeT;
|
||||
LzInWindow_Free(p, alloc);
|
||||
|
@ -101,11 +101,11 @@ static int LzInWindow_Create2(CMatchFinder *p, UInt32 blockSize, ISzAllocPtr all
|
|||
#endif
|
||||
*/
|
||||
|
||||
p->bufferBase = (Byte *)ISzAlloc_Alloc(alloc, blockSize);
|
||||
// printf("\nbufferBase = %p\n", p->bufferBase);
|
||||
p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, blockSize);
|
||||
// printf("\nbufferBase = %p\n", p->bufBase);
|
||||
// return 0; // for debug
|
||||
}
|
||||
return (p->bufferBase != NULL);
|
||||
return (p->bufBase != NULL);
|
||||
}
|
||||
|
||||
static const Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
|
||||
|
@ -113,7 +113,7 @@ static const Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return
|
|||
static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return GET_AVAIL_BYTES(p); }
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static void MatchFinder_ReadBlock(CMatchFinder *p)
|
||||
{
|
||||
if (p->streamEndWasReached || p->result != SZ_OK)
|
||||
|
@ -127,8 +127,8 @@ static void MatchFinder_ReadBlock(CMatchFinder *p)
|
|||
UInt32 curSize = 0xFFFFFFFF - GET_AVAIL_BYTES(p);
|
||||
if (curSize > p->directInputRem)
|
||||
curSize = (UInt32)p->directInputRem;
|
||||
p->directInputRem -= curSize;
|
||||
p->streamPos += curSize;
|
||||
p->directInputRem -= curSize;
|
||||
if (p->directInputRem == 0)
|
||||
p->streamEndWasReached = 1;
|
||||
return;
|
||||
|
@ -136,8 +136,8 @@ static void MatchFinder_ReadBlock(CMatchFinder *p)
|
|||
|
||||
for (;;)
|
||||
{
|
||||
Byte *dest = p->buffer + GET_AVAIL_BYTES(p);
|
||||
size_t size = (size_t)(p->bufferBase + p->blockSize - dest);
|
||||
const Byte *dest = p->buffer + GET_AVAIL_BYTES(p);
|
||||
size_t size = (size_t)(p->bufBase + p->blockSize - dest);
|
||||
if (size == 0)
|
||||
{
|
||||
/* we call ReadBlock() after NeedMove() and MoveBlock().
|
||||
|
@ -153,7 +153,14 @@ static void MatchFinder_ReadBlock(CMatchFinder *p)
|
|||
// #define kRead 3
|
||||
// if (size > kRead) size = kRead; // for debug
|
||||
|
||||
p->result = ISeqInStream_Read(p->stream, dest, &size);
|
||||
/*
|
||||
// we need cast (Byte *)dest.
|
||||
#ifdef __clang__
|
||||
#pragma GCC diagnostic ignored "-Wcast-qual"
|
||||
#endif
|
||||
*/
|
||||
p->result = ISeqInStream_Read(p->stream,
|
||||
p->bufBase + (dest - p->bufBase), &size);
|
||||
if (p->result != SZ_OK)
|
||||
return;
|
||||
if (size == 0)
|
||||
|
@ -173,14 +180,14 @@ static void MatchFinder_ReadBlock(CMatchFinder *p)
|
|||
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
void MatchFinder_MoveBlock(CMatchFinder *p)
|
||||
{
|
||||
const size_t offset = (size_t)(p->buffer - p->bufferBase) - p->keepSizeBefore;
|
||||
const size_t offset = (size_t)(p->buffer - p->bufBase) - p->keepSizeBefore;
|
||||
const size_t keepBefore = (offset & (kBlockMoveAlign - 1)) + p->keepSizeBefore;
|
||||
p->buffer = p->bufferBase + keepBefore;
|
||||
memmove(p->bufferBase,
|
||||
p->bufferBase + (offset & ~((size_t)kBlockMoveAlign - 1)),
|
||||
p->buffer = p->bufBase + keepBefore;
|
||||
memmove(p->bufBase,
|
||||
p->bufBase + (offset & ~((size_t)kBlockMoveAlign - 1)),
|
||||
keepBefore + (size_t)GET_AVAIL_BYTES(p));
|
||||
}
|
||||
|
||||
|
@ -198,7 +205,7 @@ int MatchFinder_NeedMove(CMatchFinder *p)
|
|||
return 0;
|
||||
if (p->streamEndWasReached || p->result != SZ_OK)
|
||||
return 0;
|
||||
return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
|
||||
return ((size_t)(p->bufBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
|
||||
}
|
||||
|
||||
void MatchFinder_ReadIfRequired(CMatchFinder *p)
|
||||
|
@ -214,6 +221,8 @@ static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
|
|||
p->cutValue = 32;
|
||||
p->btMode = 1;
|
||||
p->numHashBytes = 4;
|
||||
p->numHashBytes_Min = 2;
|
||||
p->numHashOutBits = 0;
|
||||
p->bigHash = 0;
|
||||
}
|
||||
|
||||
|
@ -222,8 +231,10 @@ static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
|
|||
void MatchFinder_Construct(CMatchFinder *p)
|
||||
{
|
||||
unsigned i;
|
||||
p->bufferBase = NULL;
|
||||
p->buffer = NULL;
|
||||
p->bufBase = NULL;
|
||||
p->directInput = 0;
|
||||
p->stream = NULL;
|
||||
p->hash = NULL;
|
||||
p->expectedDataSize = (UInt64)(Int64)-1;
|
||||
MatchFinder_SetDefaultSettings(p);
|
||||
|
@ -238,6 +249,8 @@ void MatchFinder_Construct(CMatchFinder *p)
|
|||
}
|
||||
}
|
||||
|
||||
#undef kCrcPoly
|
||||
|
||||
static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc)
|
||||
{
|
||||
ISzAlloc_Free(alloc, p->hash);
|
||||
|
@ -252,7 +265,7 @@ void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc)
|
|||
|
||||
static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc)
|
||||
{
|
||||
size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
|
||||
const size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
|
||||
if (sizeInBytes / sizeof(CLzRef) != num)
|
||||
return NULL;
|
||||
return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes);
|
||||
|
@ -298,6 +311,62 @@ static UInt32 GetBlockSize(CMatchFinder *p, UInt32 historySize)
|
|||
}
|
||||
|
||||
|
||||
// input is historySize
|
||||
static UInt32 MatchFinder_GetHashMask2(CMatchFinder *p, UInt32 hs)
|
||||
{
|
||||
if (p->numHashBytes == 2)
|
||||
return (1 << 16) - 1;
|
||||
if (hs != 0)
|
||||
hs--;
|
||||
hs |= (hs >> 1);
|
||||
hs |= (hs >> 2);
|
||||
hs |= (hs >> 4);
|
||||
hs |= (hs >> 8);
|
||||
// we propagated 16 bits in (hs). Low 16 bits must be set later
|
||||
if (hs >= (1 << 24))
|
||||
{
|
||||
if (p->numHashBytes == 3)
|
||||
hs = (1 << 24) - 1;
|
||||
/* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
|
||||
}
|
||||
// (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
|
||||
hs |= (1 << 16) - 1; /* don't change it! */
|
||||
// bt5: we adjust the size with recommended minimum size
|
||||
if (p->numHashBytes >= 5)
|
||||
hs |= (256 << kLzHash_CrcShift_2) - 1;
|
||||
return hs;
|
||||
}
|
||||
|
||||
// input is historySize
|
||||
static UInt32 MatchFinder_GetHashMask(CMatchFinder *p, UInt32 hs)
|
||||
{
|
||||
if (p->numHashBytes == 2)
|
||||
return (1 << 16) - 1;
|
||||
if (hs != 0)
|
||||
hs--;
|
||||
hs |= (hs >> 1);
|
||||
hs |= (hs >> 2);
|
||||
hs |= (hs >> 4);
|
||||
hs |= (hs >> 8);
|
||||
// we propagated 16 bits in (hs). Low 16 bits must be set later
|
||||
hs >>= 1;
|
||||
if (hs >= (1 << 24))
|
||||
{
|
||||
if (p->numHashBytes == 3)
|
||||
hs = (1 << 24) - 1;
|
||||
else
|
||||
hs >>= 1;
|
||||
/* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
|
||||
}
|
||||
// (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
|
||||
hs |= (1 << 16) - 1; /* don't change it! */
|
||||
// bt5: we adjust the size with recommended minimum size
|
||||
if (p->numHashBytes >= 5)
|
||||
hs |= (256 << kLzHash_CrcShift_2) - 1;
|
||||
return hs;
|
||||
}
|
||||
|
||||
|
||||
int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
|
||||
UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
|
||||
ISzAllocPtr alloc)
|
||||
|
@ -318,78 +387,91 @@ int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
|
|||
p->blockSize = 0;
|
||||
if (p->directInput || LzInWindow_Create2(p, GetBlockSize(p, historySize), alloc))
|
||||
{
|
||||
const UInt32 newCyclicBufferSize = historySize + 1; // do not change it
|
||||
UInt32 hs;
|
||||
p->matchMaxLen = matchMaxLen;
|
||||
size_t hashSizeSum;
|
||||
{
|
||||
// UInt32 hs4;
|
||||
p->fixedHashSize = 0;
|
||||
hs = (1 << 16) - 1;
|
||||
if (p->numHashBytes != 2)
|
||||
UInt32 hs;
|
||||
UInt32 hsCur;
|
||||
|
||||
if (p->numHashOutBits != 0)
|
||||
{
|
||||
hs = historySize;
|
||||
if (hs > p->expectedDataSize)
|
||||
hs = (UInt32)p->expectedDataSize;
|
||||
if (hs != 0)
|
||||
hs--;
|
||||
hs |= (hs >> 1);
|
||||
hs |= (hs >> 2);
|
||||
hs |= (hs >> 4);
|
||||
hs |= (hs >> 8);
|
||||
// we propagated 16 bits in (hs). Low 16 bits must be set later
|
||||
hs >>= 1;
|
||||
if (hs >= (1 << 24))
|
||||
{
|
||||
if (p->numHashBytes == 3)
|
||||
hs = (1 << 24) - 1;
|
||||
else
|
||||
hs >>= 1;
|
||||
/* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
|
||||
}
|
||||
|
||||
// hs = ((UInt32)1 << 25) - 1; // for test
|
||||
|
||||
unsigned numBits = p->numHashOutBits;
|
||||
const unsigned nbMax =
|
||||
(p->numHashBytes == 2 ? 16 :
|
||||
(p->numHashBytes == 3 ? 24 : 32));
|
||||
if (numBits > nbMax)
|
||||
numBits = nbMax;
|
||||
if (numBits >= 32)
|
||||
hs = (UInt32)0 - 1;
|
||||
else
|
||||
hs = ((UInt32)1 << numBits) - 1;
|
||||
// (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
|
||||
hs |= (1 << 16) - 1; /* don't change it! */
|
||||
|
||||
// bt5: we adjust the size with recommended minimum size
|
||||
if (p->numHashBytes >= 5)
|
||||
hs |= (256 << kLzHash_CrcShift_2) - 1;
|
||||
{
|
||||
const UInt32 hs2 = MatchFinder_GetHashMask2(p, historySize);
|
||||
if (hs > hs2)
|
||||
hs = hs2;
|
||||
}
|
||||
hsCur = hs;
|
||||
if (p->expectedDataSize < historySize)
|
||||
{
|
||||
const UInt32 hs2 = MatchFinder_GetHashMask2(p, (UInt32)p->expectedDataSize);
|
||||
if (hsCur > hs2)
|
||||
hsCur = hs2;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
hs = MatchFinder_GetHashMask(p, historySize);
|
||||
hsCur = hs;
|
||||
if (p->expectedDataSize < historySize)
|
||||
{
|
||||
hsCur = MatchFinder_GetHashMask(p, (UInt32)p->expectedDataSize);
|
||||
if (hsCur > hs) // is it possible?
|
||||
hsCur = hs;
|
||||
}
|
||||
}
|
||||
p->hashMask = hs;
|
||||
hs++;
|
||||
|
||||
/*
|
||||
hs4 = (1 << 20);
|
||||
if (hs4 > hs)
|
||||
hs4 = hs;
|
||||
// hs4 = (1 << 16); // for test
|
||||
p->hash4Mask = hs4 - 1;
|
||||
*/
|
||||
p->hashMask = hsCur;
|
||||
|
||||
if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
|
||||
if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
|
||||
// if (p->numHashBytes > 4) p->fixedHashSize += hs4; // kHash4Size;
|
||||
hs += p->fixedHashSize;
|
||||
hashSizeSum = hs;
|
||||
hashSizeSum++;
|
||||
if (hashSizeSum < hs)
|
||||
return 0;
|
||||
{
|
||||
UInt32 fixedHashSize = 0;
|
||||
if (p->numHashBytes > 2 && p->numHashBytes_Min <= 2) fixedHashSize += kHash2Size;
|
||||
if (p->numHashBytes > 3 && p->numHashBytes_Min <= 3) fixedHashSize += kHash3Size;
|
||||
// if (p->numHashBytes > 4) p->fixedHashSize += hs4; // kHash4Size;
|
||||
hashSizeSum += fixedHashSize;
|
||||
p->fixedHashSize = fixedHashSize;
|
||||
}
|
||||
}
|
||||
|
||||
p->matchMaxLen = matchMaxLen;
|
||||
|
||||
{
|
||||
size_t newSize;
|
||||
size_t numSons;
|
||||
const UInt32 newCyclicBufferSize = historySize + 1; // do not change it
|
||||
p->historySize = historySize;
|
||||
p->hashSizeSum = hs;
|
||||
p->cyclicBufferSize = newCyclicBufferSize; // it must be = (historySize + 1)
|
||||
|
||||
numSons = newCyclicBufferSize;
|
||||
if (p->btMode)
|
||||
numSons <<= 1;
|
||||
newSize = hs + numSons;
|
||||
newSize = hashSizeSum + numSons;
|
||||
|
||||
if (numSons < newCyclicBufferSize || newSize < numSons)
|
||||
return 0;
|
||||
|
||||
// aligned size is not required here, but it can be better for some loops
|
||||
#define NUM_REFS_ALIGN_MASK 0xF
|
||||
newSize = (newSize + NUM_REFS_ALIGN_MASK) & ~(size_t)NUM_REFS_ALIGN_MASK;
|
||||
|
||||
if (p->hash && p->numRefs == newSize)
|
||||
// 22.02: we don't reallocate buffer, if old size is enough
|
||||
if (p->hash && p->numRefs >= newSize)
|
||||
return 1;
|
||||
|
||||
MatchFinder_FreeThisClassMemory(p, alloc);
|
||||
|
@ -398,7 +480,7 @@ int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
|
|||
|
||||
if (p->hash)
|
||||
{
|
||||
p->son = p->hash + p->hashSizeSum;
|
||||
p->son = p->hash + hashSizeSum;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -470,7 +552,8 @@ void MatchFinder_Init_HighHash(CMatchFinder *p)
|
|||
|
||||
void MatchFinder_Init_4(CMatchFinder *p)
|
||||
{
|
||||
p->buffer = p->bufferBase;
|
||||
if (!p->directInput)
|
||||
p->buffer = p->bufBase;
|
||||
{
|
||||
/* kEmptyHashValue = 0 (Zero) is used in hash tables as NO-VALUE marker.
|
||||
the code in CMatchFinderMt expects (pos = 1) */
|
||||
|
@ -506,21 +589,21 @@ void MatchFinder_Init(CMatchFinder *p)
|
|||
|
||||
|
||||
|
||||
#if defined(MY_CPU_X86_OR_AMD64) && (!defined(_MSC_VER) || !defined(__clang__))
|
||||
#if defined(__clang__) && (__clang_major__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 8) \
|
||||
|| defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
|
||||
#define USE_SATUR_SUB_128
|
||||
#define USE_AVX2
|
||||
#define ATTRIB_SSE41 __attribute__((__target__("sse4.1")))
|
||||
#define ATTRIB_AVX2 __attribute__((__target__("avx2")))
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
#if defined(__clang__) && (__clang_major__ >= 4) \
|
||||
|| defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40701)
|
||||
// || defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
|
||||
|
||||
#define USE_LZFIND_SATUR_SUB_128
|
||||
#define USE_LZFIND_SATUR_SUB_256
|
||||
#define LZFIND_ATTRIB_SSE41 __attribute__((__target__("sse4.1")))
|
||||
#define LZFIND_ATTRIB_AVX2 __attribute__((__target__("avx2")))
|
||||
#elif defined(_MSC_VER)
|
||||
#if (_MSC_VER >= 1600)
|
||||
#define USE_SATUR_SUB_128
|
||||
#if (_MSC_VER >= 1900)
|
||||
#define USE_AVX2
|
||||
#include <immintrin.h> // avx
|
||||
#endif
|
||||
#define USE_LZFIND_SATUR_SUB_128
|
||||
#endif
|
||||
#if (_MSC_VER >= 1900)
|
||||
#define USE_LZFIND_SATUR_SUB_256
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -529,16 +612,16 @@ void MatchFinder_Init(CMatchFinder *p)
|
|||
|
||||
#if defined(__clang__) && (__clang_major__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 8)
|
||||
#define USE_SATUR_SUB_128
|
||||
#define USE_LZFIND_SATUR_SUB_128
|
||||
#ifdef MY_CPU_ARM64
|
||||
// #define ATTRIB_SSE41 __attribute__((__target__("")))
|
||||
// #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("")))
|
||||
#else
|
||||
// #define ATTRIB_SSE41 __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
|
||||
// #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
|
||||
#endif
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
#if (_MSC_VER >= 1910)
|
||||
#define USE_SATUR_SUB_128
|
||||
#define USE_LZFIND_SATUR_SUB_128
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -550,121 +633,130 @@ void MatchFinder_Init(CMatchFinder *p)
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifndef ATTRIB_SSE41
|
||||
#define ATTRIB_SSE41
|
||||
#endif
|
||||
#ifndef ATTRIB_AVX2
|
||||
#define ATTRIB_AVX2
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef USE_SATUR_SUB_128
|
||||
#ifdef USE_LZFIND_SATUR_SUB_128
|
||||
|
||||
// #define _SHOW_HW_STATUS
|
||||
// #define Z7_SHOW_HW_STATUS
|
||||
|
||||
#ifdef _SHOW_HW_STATUS
|
||||
#ifdef Z7_SHOW_HW_STATUS
|
||||
#include <stdio.h>
|
||||
#define _PRF(x) x
|
||||
_PRF(;)
|
||||
#define PRF(x) x
|
||||
PRF(;)
|
||||
#else
|
||||
#define _PRF(x)
|
||||
#define PRF(x)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef MY_CPU_ARM_OR_ARM64
|
||||
|
||||
#ifdef MY_CPU_ARM64
|
||||
// #define FORCE_SATUR_SUB_128
|
||||
// #define FORCE_LZFIND_SATUR_SUB_128
|
||||
#endif
|
||||
typedef uint32x4_t LzFind_v128;
|
||||
#define SASUB_128_V(v, s) \
|
||||
vsubq_u32(vmaxq_u32(v, s), s)
|
||||
|
||||
typedef uint32x4_t v128;
|
||||
#define SASUB_128(i) \
|
||||
*(v128 *)(void *)(items + (i) * 4) = \
|
||||
vsubq_u32(vmaxq_u32(*(const v128 *)(const void *)(items + (i) * 4), sub2), sub2);
|
||||
|
||||
#else
|
||||
#else // MY_CPU_ARM_OR_ARM64
|
||||
|
||||
#include <smmintrin.h> // sse4.1
|
||||
|
||||
typedef __m128i v128;
|
||||
typedef __m128i LzFind_v128;
|
||||
// SSE 4.1
|
||||
#define SASUB_128_V(v, s) \
|
||||
_mm_sub_epi32(_mm_max_epu32(v, s), s)
|
||||
|
||||
#endif // MY_CPU_ARM_OR_ARM64
|
||||
|
||||
|
||||
#define SASUB_128(i) \
|
||||
*(v128 *)(void *)(items + (i) * 4) = \
|
||||
_mm_sub_epi32(_mm_max_epu32(*(const v128 *)(const void *)(items + (i) * 4), sub2), sub2); // SSE 4.1
|
||||
|
||||
#endif
|
||||
*( LzFind_v128 *)( void *)(items + (i) * 4) = SASUB_128_V( \
|
||||
*(const LzFind_v128 *)(const void *)(items + (i) * 4), sub2);
|
||||
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static
|
||||
#ifdef ATTRIB_SSE41
|
||||
ATTRIB_SSE41
|
||||
#ifdef LZFIND_ATTRIB_SSE41
|
||||
LZFIND_ATTRIB_SSE41
|
||||
#endif
|
||||
void
|
||||
MY_FAST_CALL
|
||||
Z7_FASTCALL
|
||||
LzFind_SaturSub_128(UInt32 subValue, CLzRef *items, const CLzRef *lim)
|
||||
{
|
||||
v128 sub2 =
|
||||
const LzFind_v128 sub2 =
|
||||
#ifdef MY_CPU_ARM_OR_ARM64
|
||||
vdupq_n_u32(subValue);
|
||||
#else
|
||||
_mm_set_epi32((Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
|
||||
#endif
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SASUB_128(0)
|
||||
SASUB_128(1)
|
||||
SASUB_128(2)
|
||||
SASUB_128(3)
|
||||
items += 4 * 4;
|
||||
SASUB_128(0) SASUB_128(1) items += 2 * 4;
|
||||
SASUB_128(0) SASUB_128(1) items += 2 * 4;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifdef USE_AVX2
|
||||
#ifdef USE_LZFIND_SATUR_SUB_256
|
||||
|
||||
#include <immintrin.h> // avx
|
||||
/*
|
||||
clang :immintrin.h uses
|
||||
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
|
||||
defined(__AVX2__)
|
||||
#include <avx2intrin.h>
|
||||
#endif
|
||||
so we need <avxintrin.h> for clang-cl */
|
||||
|
||||
#define SASUB_256(i) *(__m256i *)(void *)(items + (i) * 8) = _mm256_sub_epi32(_mm256_max_epu32(*(const __m256i *)(const void *)(items + (i) * 8), sub2), sub2); // AVX2
|
||||
#if defined(__clang__)
|
||||
#include <avxintrin.h>
|
||||
#include <avx2intrin.h>
|
||||
#endif
|
||||
|
||||
MY_NO_INLINE
|
||||
// AVX2:
|
||||
#define SASUB_256(i) \
|
||||
*( __m256i *)( void *)(items + (i) * 8) = \
|
||||
_mm256_sub_epi32(_mm256_max_epu32( \
|
||||
*(const __m256i *)(const void *)(items + (i) * 8), sub2), sub2);
|
||||
|
||||
Z7_NO_INLINE
|
||||
static
|
||||
#ifdef ATTRIB_AVX2
|
||||
ATTRIB_AVX2
|
||||
#ifdef LZFIND_ATTRIB_AVX2
|
||||
LZFIND_ATTRIB_AVX2
|
||||
#endif
|
||||
void
|
||||
MY_FAST_CALL
|
||||
Z7_FASTCALL
|
||||
LzFind_SaturSub_256(UInt32 subValue, CLzRef *items, const CLzRef *lim)
|
||||
{
|
||||
__m256i sub2 = _mm256_set_epi32(
|
||||
const __m256i sub2 = _mm256_set_epi32(
|
||||
(Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue,
|
||||
(Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SASUB_256(0)
|
||||
SASUB_256(1)
|
||||
items += 2 * 8;
|
||||
SASUB_256(0) SASUB_256(1) items += 2 * 8;
|
||||
SASUB_256(0) SASUB_256(1) items += 2 * 8;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
#endif // USE_AVX2
|
||||
#endif // USE_LZFIND_SATUR_SUB_256
|
||||
|
||||
#ifndef FORCE_SATUR_SUB_128
|
||||
typedef void (MY_FAST_CALL *LZFIND_SATUR_SUB_CODE_FUNC)(
|
||||
#ifndef FORCE_LZFIND_SATUR_SUB_128
|
||||
typedef void (Z7_FASTCALL *LZFIND_SATUR_SUB_CODE_FUNC)(
|
||||
UInt32 subValue, CLzRef *items, const CLzRef *lim);
|
||||
static LZFIND_SATUR_SUB_CODE_FUNC g_LzFind_SaturSub;
|
||||
#endif // FORCE_SATUR_SUB_128
|
||||
#endif // FORCE_LZFIND_SATUR_SUB_128
|
||||
|
||||
#endif // USE_SATUR_SUB_128
|
||||
#endif // USE_LZFIND_SATUR_SUB_128
|
||||
|
||||
|
||||
// kEmptyHashValue must be zero
|
||||
// #define SASUB_32(i) v = items[i]; m = v - subValue; if (v < subValue) m = kEmptyHashValue; items[i] = m;
|
||||
#define SASUB_32(i) v = items[i]; if (v < subValue) v = subValue; items[i] = v - subValue;
|
||||
// #define SASUB_32(i) { UInt32 v = items[i]; UInt32 m = v - subValue; if (v < subValue) m = kEmptyHashValue; items[i] = m; }
|
||||
#define SASUB_32(i) { UInt32 v = items[i]; if (v < subValue) v = subValue; items[i] = v - subValue; }
|
||||
|
||||
#ifdef FORCE_SATUR_SUB_128
|
||||
#ifdef FORCE_LZFIND_SATUR_SUB_128
|
||||
|
||||
#define DEFAULT_SaturSub LzFind_SaturSub_128
|
||||
|
||||
|
@ -672,24 +764,19 @@ static LZFIND_SATUR_SUB_CODE_FUNC g_LzFind_SaturSub;
|
|||
|
||||
#define DEFAULT_SaturSub LzFind_SaturSub_32
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static
|
||||
void
|
||||
MY_FAST_CALL
|
||||
Z7_FASTCALL
|
||||
LzFind_SaturSub_32(UInt32 subValue, CLzRef *items, const CLzRef *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
UInt32 v;
|
||||
SASUB_32(0)
|
||||
SASUB_32(1)
|
||||
SASUB_32(2)
|
||||
SASUB_32(3)
|
||||
SASUB_32(4)
|
||||
SASUB_32(5)
|
||||
SASUB_32(6)
|
||||
SASUB_32(7)
|
||||
items += 8;
|
||||
SASUB_32(0) SASUB_32(1) items += 2;
|
||||
SASUB_32(0) SASUB_32(1) items += 2;
|
||||
SASUB_32(0) SASUB_32(1) items += 2;
|
||||
SASUB_32(0) SASUB_32(1) items += 2;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
@ -697,27 +784,23 @@ LzFind_SaturSub_32(UInt32 subValue, CLzRef *items, const CLzRef *lim)
|
|||
#endif
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
|
||||
{
|
||||
#define K_NORM_ALIGN_BLOCK_SIZE (1 << 6)
|
||||
|
||||
CLzRef *lim;
|
||||
|
||||
for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (K_NORM_ALIGN_BLOCK_SIZE - 1)) != 0; numItems--)
|
||||
#define LZFIND_NORM_ALIGN_BLOCK_SIZE (1 << 7)
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (LZFIND_NORM_ALIGN_BLOCK_SIZE - 1)) != 0; numItems--)
|
||||
{
|
||||
UInt32 v;
|
||||
SASUB_32(0);
|
||||
SASUB_32(0)
|
||||
items++;
|
||||
}
|
||||
|
||||
{
|
||||
#define K_NORM_ALIGN_MASK (K_NORM_ALIGN_BLOCK_SIZE / 4 - 1)
|
||||
lim = items + (numItems & ~(size_t)K_NORM_ALIGN_MASK);
|
||||
numItems &= K_NORM_ALIGN_MASK;
|
||||
const size_t k_Align_Mask = (LZFIND_NORM_ALIGN_BLOCK_SIZE / 4 - 1);
|
||||
CLzRef *lim = items + (numItems & ~(size_t)k_Align_Mask);
|
||||
numItems &= k_Align_Mask;
|
||||
if (items != lim)
|
||||
{
|
||||
#if defined(USE_SATUR_SUB_128) && !defined(FORCE_SATUR_SUB_128)
|
||||
#if defined(USE_LZFIND_SATUR_SUB_128) && !defined(FORCE_LZFIND_SATUR_SUB_128)
|
||||
if (g_LzFind_SaturSub)
|
||||
g_LzFind_SaturSub(subValue, items, lim);
|
||||
else
|
||||
|
@ -726,12 +809,10 @@ void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
|
|||
}
|
||||
items = lim;
|
||||
}
|
||||
|
||||
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (; numItems != 0; numItems--)
|
||||
{
|
||||
UInt32 v;
|
||||
SASUB_32(0);
|
||||
SASUB_32(0)
|
||||
items++;
|
||||
}
|
||||
}
|
||||
|
@ -740,7 +821,7 @@ void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
|
|||
|
||||
// call MatchFinder_CheckLimits() only after (p->pos++) update
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static void MatchFinder_CheckLimits(CMatchFinder *p)
|
||||
{
|
||||
if (// !p->streamEndWasReached && p->result == SZ_OK &&
|
||||
|
@ -768,11 +849,14 @@ static void MatchFinder_CheckLimits(CMatchFinder *p)
|
|||
const UInt32 subValue = (p->pos - p->historySize - 1) /* & ~(UInt32)(kNormalizeAlign - 1) */;
|
||||
// const UInt32 subValue = (1 << 15); // for debug
|
||||
// printf("\nMatchFinder_Normalize() subValue == 0x%x\n", subValue);
|
||||
size_t numSonRefs = p->cyclicBufferSize;
|
||||
if (p->btMode)
|
||||
numSonRefs <<= 1;
|
||||
Inline_MatchFinder_ReduceOffsets(p, subValue);
|
||||
MatchFinder_Normalize3(subValue, p->hash, (size_t)p->hashSizeSum + numSonRefs);
|
||||
MatchFinder_REDUCE_OFFSETS(p, subValue)
|
||||
MatchFinder_Normalize3(subValue, p->hash, (size_t)p->hashMask + 1 + p->fixedHashSize);
|
||||
{
|
||||
size_t numSonRefs = p->cyclicBufferSize;
|
||||
if (p->btMode)
|
||||
numSonRefs <<= 1;
|
||||
MatchFinder_Normalize3(subValue, p->son, numSonRefs);
|
||||
}
|
||||
}
|
||||
|
||||
if (p->cyclicBufferPos == p->cyclicBufferSize)
|
||||
|
@ -785,7 +869,7 @@ static void MatchFinder_CheckLimits(CMatchFinder *p)
|
|||
/*
|
||||
(lenLimit > maxLen)
|
||||
*/
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
static UInt32 * Hc_GetMatchesSpec(size_t lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
|
||||
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
|
||||
UInt32 *d, unsigned maxLen)
|
||||
|
@ -867,7 +951,7 @@ static UInt32 * Hc_GetMatchesSpec(size_t lenLimit, UInt32 curMatch, UInt32 pos,
|
|||
}
|
||||
|
||||
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
|
||||
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
|
||||
UInt32 *d, UInt32 maxLen)
|
||||
|
@ -1004,7 +1088,7 @@ static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const
|
|||
|
||||
#define MOVE_POS_RET MOVE_POS return distances;
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static void MatchFinder_MovePos(CMatchFinder *p)
|
||||
{
|
||||
/* we go here at the end of stream data, when (avail < num_hash_bytes)
|
||||
|
@ -1015,11 +1099,11 @@ static void MatchFinder_MovePos(CMatchFinder *p)
|
|||
if (p->btMode)
|
||||
p->sons[(p->cyclicBufferPos << p->btMode) + 1] = 0; // kEmptyHashValue
|
||||
*/
|
||||
MOVE_POS;
|
||||
MOVE_POS
|
||||
}
|
||||
|
||||
#define GET_MATCHES_HEADER2(minLen, ret_op) \
|
||||
unsigned lenLimit; UInt32 hv; Byte *cur; UInt32 curMatch; \
|
||||
unsigned lenLimit; UInt32 hv; const Byte *cur; UInt32 curMatch; \
|
||||
lenLimit = (unsigned)p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
|
||||
cur = p->buffer;
|
||||
|
||||
|
@ -1028,11 +1112,11 @@ static void MatchFinder_MovePos(CMatchFinder *p)
|
|||
|
||||
#define MF_PARAMS(p) lenLimit, curMatch, p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
|
||||
|
||||
#define SKIP_FOOTER SkipMatchesSpec(MF_PARAMS(p)); MOVE_POS; } while (--num);
|
||||
#define SKIP_FOOTER SkipMatchesSpec(MF_PARAMS(p)); MOVE_POS } while (--num);
|
||||
|
||||
#define GET_MATCHES_FOOTER_BASE(_maxLen_, func) \
|
||||
distances = func(MF_PARAMS(p), \
|
||||
distances, (UInt32)_maxLen_); MOVE_POS_RET;
|
||||
distances, (UInt32)_maxLen_); MOVE_POS_RET
|
||||
|
||||
#define GET_MATCHES_FOOTER_BT(_maxLen_) \
|
||||
GET_MATCHES_FOOTER_BASE(_maxLen_, GetMatchesSpec1)
|
||||
|
@ -1052,7 +1136,7 @@ static void MatchFinder_MovePos(CMatchFinder *p)
|
|||
static UInt32* Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
||||
{
|
||||
GET_MATCHES_HEADER(2)
|
||||
HASH2_CALC;
|
||||
HASH2_CALC
|
||||
curMatch = p->hash[hv];
|
||||
p->hash[hv] = p->pos;
|
||||
GET_MATCHES_FOOTER_BT(1)
|
||||
|
@ -1061,7 +1145,7 @@ static UInt32* Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
||||
{
|
||||
GET_MATCHES_HEADER(3)
|
||||
HASH_ZIP_CALC;
|
||||
HASH_ZIP_CALC
|
||||
curMatch = p->hash[hv];
|
||||
p->hash[hv] = p->pos;
|
||||
GET_MATCHES_FOOTER_BT(2)
|
||||
|
@ -1082,7 +1166,7 @@ static UInt32* Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
UInt32 *hash;
|
||||
GET_MATCHES_HEADER(3)
|
||||
|
||||
HASH3_CALC;
|
||||
HASH3_CALC
|
||||
|
||||
hash = p->hash;
|
||||
pos = p->pos;
|
||||
|
@ -1107,7 +1191,7 @@ static UInt32* Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
if (maxLen == lenLimit)
|
||||
{
|
||||
SkipMatchesSpec(MF_PARAMS(p));
|
||||
MOVE_POS_RET;
|
||||
MOVE_POS_RET
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1123,7 +1207,7 @@ static UInt32* Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
UInt32 *hash;
|
||||
GET_MATCHES_HEADER(4)
|
||||
|
||||
HASH4_CALC;
|
||||
HASH4_CALC
|
||||
|
||||
hash = p->hash;
|
||||
pos = p->pos;
|
||||
|
@ -1190,7 +1274,7 @@ static UInt32* Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
UInt32 *hash;
|
||||
GET_MATCHES_HEADER(5)
|
||||
|
||||
HASH5_CALC;
|
||||
HASH5_CALC
|
||||
|
||||
hash = p->hash;
|
||||
pos = p->pos;
|
||||
|
@ -1246,7 +1330,7 @@ static UInt32* Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
if (maxLen == lenLimit)
|
||||
{
|
||||
SkipMatchesSpec(MF_PARAMS(p));
|
||||
MOVE_POS_RET;
|
||||
MOVE_POS_RET
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1263,7 +1347,7 @@ static UInt32* Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
UInt32 *hash;
|
||||
GET_MATCHES_HEADER(4)
|
||||
|
||||
HASH4_CALC;
|
||||
HASH4_CALC
|
||||
|
||||
hash = p->hash;
|
||||
pos = p->pos;
|
||||
|
@ -1314,12 +1398,12 @@ static UInt32* Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
if (maxLen == lenLimit)
|
||||
{
|
||||
p->son[p->cyclicBufferPos] = curMatch;
|
||||
MOVE_POS_RET;
|
||||
MOVE_POS_RET
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
GET_MATCHES_FOOTER_HC(maxLen);
|
||||
GET_MATCHES_FOOTER_HC(maxLen)
|
||||
}
|
||||
|
||||
|
||||
|
@ -1330,7 +1414,7 @@ static UInt32 * Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
UInt32 *hash;
|
||||
GET_MATCHES_HEADER(5)
|
||||
|
||||
HASH5_CALC;
|
||||
HASH5_CALC
|
||||
|
||||
hash = p->hash;
|
||||
pos = p->pos;
|
||||
|
@ -1386,19 +1470,19 @@ static UInt32 * Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
|||
if (maxLen == lenLimit)
|
||||
{
|
||||
p->son[p->cyclicBufferPos] = curMatch;
|
||||
MOVE_POS_RET;
|
||||
MOVE_POS_RET
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
GET_MATCHES_FOOTER_HC(maxLen);
|
||||
GET_MATCHES_FOOTER_HC(maxLen)
|
||||
}
|
||||
|
||||
|
||||
UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
|
||||
{
|
||||
GET_MATCHES_HEADER(3)
|
||||
HASH_ZIP_CALC;
|
||||
HASH_ZIP_CALC
|
||||
curMatch = p->hash[hv];
|
||||
p->hash[hv] = p->pos;
|
||||
GET_MATCHES_FOOTER_HC(2)
|
||||
|
@ -1409,7 +1493,7 @@ static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
{
|
||||
SKIP_HEADER(2)
|
||||
{
|
||||
HASH2_CALC;
|
||||
HASH2_CALC
|
||||
curMatch = p->hash[hv];
|
||||
p->hash[hv] = p->pos;
|
||||
}
|
||||
|
@ -1420,7 +1504,7 @@ void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
{
|
||||
SKIP_HEADER(3)
|
||||
{
|
||||
HASH_ZIP_CALC;
|
||||
HASH_ZIP_CALC
|
||||
curMatch = p->hash[hv];
|
||||
p->hash[hv] = p->pos;
|
||||
}
|
||||
|
@ -1433,7 +1517,7 @@ static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
{
|
||||
UInt32 h2;
|
||||
UInt32 *hash;
|
||||
HASH3_CALC;
|
||||
HASH3_CALC
|
||||
hash = p->hash;
|
||||
curMatch = (hash + kFix3HashSize)[hv];
|
||||
hash[h2] =
|
||||
|
@ -1448,7 +1532,7 @@ static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
{
|
||||
UInt32 h2, h3;
|
||||
UInt32 *hash;
|
||||
HASH4_CALC;
|
||||
HASH4_CALC
|
||||
hash = p->hash;
|
||||
curMatch = (hash + kFix4HashSize)[hv];
|
||||
hash [h2] =
|
||||
|
@ -1464,7 +1548,7 @@ static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
{
|
||||
UInt32 h2, h3;
|
||||
UInt32 *hash;
|
||||
HASH5_CALC;
|
||||
HASH5_CALC
|
||||
hash = p->hash;
|
||||
curMatch = (hash + kFix5HashSize)[hv];
|
||||
hash [h2] =
|
||||
|
@ -1478,7 +1562,7 @@ static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
|
||||
#define HC_SKIP_HEADER(minLen) \
|
||||
do { if (p->lenLimit < minLen) { MatchFinder_MovePos(p); num--; continue; } { \
|
||||
Byte *cur; \
|
||||
const Byte *cur; \
|
||||
UInt32 *hash; \
|
||||
UInt32 *son; \
|
||||
UInt32 pos = p->pos; \
|
||||
|
@ -1510,7 +1594,7 @@ static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
HC_SKIP_HEADER(4)
|
||||
|
||||
UInt32 h2, h3;
|
||||
HASH4_CALC;
|
||||
HASH4_CALC
|
||||
curMatch = (hash + kFix4HashSize)[hv];
|
||||
hash [h2] =
|
||||
(hash + kFix3HashSize)[h3] =
|
||||
|
@ -1540,7 +1624,7 @@ void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
|
|||
{
|
||||
HC_SKIP_HEADER(3)
|
||||
|
||||
HASH_ZIP_CALC;
|
||||
HASH_ZIP_CALC
|
||||
curMatch = hash[hv];
|
||||
hash[hv] = pos;
|
||||
|
||||
|
@ -1590,17 +1674,17 @@ void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable)
|
|||
|
||||
|
||||
|
||||
void LzFindPrepare()
|
||||
void LzFindPrepare(void)
|
||||
{
|
||||
#ifndef FORCE_SATUR_SUB_128
|
||||
#ifdef USE_SATUR_SUB_128
|
||||
#ifndef FORCE_LZFIND_SATUR_SUB_128
|
||||
#ifdef USE_LZFIND_SATUR_SUB_128
|
||||
LZFIND_SATUR_SUB_CODE_FUNC f = NULL;
|
||||
#ifdef MY_CPU_ARM_OR_ARM64
|
||||
{
|
||||
if (CPU_IsSupported_NEON())
|
||||
{
|
||||
// #pragma message ("=== LzFind NEON")
|
||||
_PRF(printf("\n=== LzFind NEON\n"));
|
||||
PRF(printf("\n=== LzFind NEON\n"));
|
||||
f = LzFind_SaturSub_128;
|
||||
}
|
||||
// f = 0; // for debug
|
||||
|
@ -1609,20 +1693,25 @@ void LzFindPrepare()
|
|||
if (CPU_IsSupported_SSE41())
|
||||
{
|
||||
// #pragma message ("=== LzFind SSE41")
|
||||
_PRF(printf("\n=== LzFind SSE41\n"));
|
||||
PRF(printf("\n=== LzFind SSE41\n"));
|
||||
f = LzFind_SaturSub_128;
|
||||
|
||||
#ifdef USE_AVX2
|
||||
#ifdef USE_LZFIND_SATUR_SUB_256
|
||||
if (CPU_IsSupported_AVX2())
|
||||
{
|
||||
// #pragma message ("=== LzFind AVX2")
|
||||
_PRF(printf("\n=== LzFind AVX2\n"));
|
||||
PRF(printf("\n=== LzFind AVX2\n"));
|
||||
f = LzFind_SaturSub_256;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif // MY_CPU_ARM_OR_ARM64
|
||||
g_LzFind_SaturSub = f;
|
||||
#endif // USE_SATUR_SUB_128
|
||||
#endif // FORCE_SATUR_SUB_128
|
||||
#endif // USE_LZFIND_SATUR_SUB_128
|
||||
#endif // FORCE_LZFIND_SATUR_SUB_128
|
||||
}
|
||||
|
||||
|
||||
#undef MOVE_POS
|
||||
#undef MOVE_POS_RET
|
||||
#undef PRF
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/* LzFindOpt.c -- multithreaded Match finder for LZ algorithms
|
||||
2021-07-13 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -41,8 +41,8 @@ UInt64 g_NumIters_Bytes;
|
|||
// #define CYC_TO_POS_OFFSET 1 // for debug
|
||||
|
||||
/*
|
||||
MY_NO_INLINE
|
||||
UInt32 * MY_FAST_CALL GetMatchesSpecN_1(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
Z7_NO_INLINE
|
||||
UInt32 * Z7_FASTCALL GetMatchesSpecN_1(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size, UInt32 *posRes)
|
||||
{
|
||||
do
|
||||
|
@ -214,13 +214,13 @@ else
|
|||
to eliminate "movsx" BUG in old MSVC x64 compiler.
|
||||
*/
|
||||
|
||||
UInt32 * MY_FAST_CALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
|
||||
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
|
||||
UInt32 *posRes);
|
||||
|
||||
MY_NO_INLINE
|
||||
UInt32 * MY_FAST_CALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
Z7_NO_INLINE
|
||||
UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
|
||||
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
|
||||
UInt32 *posRes)
|
||||
|
@ -404,7 +404,7 @@ else
|
|||
/*
|
||||
typedef UInt32 uint32plus; // size_t
|
||||
|
||||
UInt32 * MY_FAST_CALL GetMatchesSpecN_3(uint32plus lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
UInt32 * Z7_FASTCALL GetMatchesSpecN_3(uint32plus lenLimit, size_t pos, const Byte *cur, CLzRef *son,
|
||||
UInt32 _cutValue, UInt32 *d, uint32plus _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
|
||||
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
|
||||
UInt32 *posRes)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Lzma2Dec.c -- LZMA2 Decoder
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
/* #define SHOW_DEBUG_INFO */
|
||||
|
||||
|
@ -71,14 +71,14 @@ static SRes Lzma2Dec_GetOldProps(Byte prop, Byte *props)
|
|||
SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc)
|
||||
{
|
||||
Byte props[LZMA_PROPS_SIZE];
|
||||
RINOK(Lzma2Dec_GetOldProps(prop, props));
|
||||
RINOK(Lzma2Dec_GetOldProps(prop, props))
|
||||
return LzmaDec_AllocateProbs(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
|
||||
}
|
||||
|
||||
SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc)
|
||||
{
|
||||
Byte props[LZMA_PROPS_SIZE];
|
||||
RINOK(Lzma2Dec_GetOldProps(prop, props));
|
||||
RINOK(Lzma2Dec_GetOldProps(prop, props))
|
||||
return LzmaDec_Allocate(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
|
||||
}
|
||||
|
||||
|
@ -474,8 +474,8 @@ SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
|
|||
SizeT outSize = *destLen, inSize = *srcLen;
|
||||
*destLen = *srcLen = 0;
|
||||
*status = LZMA_STATUS_NOT_SPECIFIED;
|
||||
Lzma2Dec_Construct(&p);
|
||||
RINOK(Lzma2Dec_AllocateProbs(&p, prop, alloc));
|
||||
Lzma2Dec_CONSTRUCT(&p)
|
||||
RINOK(Lzma2Dec_AllocateProbs(&p, prop, alloc))
|
||||
p.decoder.dic = dest;
|
||||
p.decoder.dicBufSize = outSize;
|
||||
Lzma2Dec_Init(&p);
|
||||
|
@ -487,3 +487,5 @@ SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
|
|||
Lzma2Dec_FreeProbs(&p, alloc);
|
||||
return res;
|
||||
}
|
||||
|
||||
#undef PRF
|
||||
|
|
|
@ -1,44 +1,44 @@
|
|||
/* Lzma2DecMt.c -- LZMA2 Decoder Multi-thread
|
||||
2021-04-01 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
// #define SHOW_DEBUG_INFO
|
||||
|
||||
// #define _7ZIP_ST
|
||||
// #define Z7_ST
|
||||
|
||||
#ifdef SHOW_DEBUG_INFO
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifdef SHOW_DEBUG_INFO
|
||||
#define PRF(x) x
|
||||
#else
|
||||
#define PRF(x)
|
||||
#endif
|
||||
#define PRF_STR(s) PRF(printf("\n" s "\n"))
|
||||
#define PRF_STR_INT_2(s, d1, d2) PRF(printf("\n" s " %d %d\n", (unsigned)d1, (unsigned)d2))
|
||||
#endif
|
||||
|
||||
#include "Alloc.h"
|
||||
|
||||
#include "Lzma2Dec.h"
|
||||
#include "Lzma2DecMt.h"
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
#include "MtDec.h"
|
||||
|
||||
#define LZMA2DECMT_OUT_BLOCK_MAX_DEFAULT (1 << 28)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef Z7_ST
|
||||
#ifdef SHOW_DEBUG_INFO
|
||||
#define PRF(x) x
|
||||
#else
|
||||
#define PRF(x)
|
||||
#endif
|
||||
#define PRF_STR(s) PRF(printf("\n" s "\n");)
|
||||
#define PRF_STR_INT_2(s, d1, d2) PRF(printf("\n" s " %d %d\n", (unsigned)d1, (unsigned)d2);)
|
||||
#endif
|
||||
|
||||
|
||||
void Lzma2DecMtProps_Init(CLzma2DecMtProps *p)
|
||||
{
|
||||
p->inBufSize_ST = 1 << 20;
|
||||
p->outStep_ST = 1 << 20;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->numThreads = 1;
|
||||
p->inBufSize_MT = 1 << 18;
|
||||
p->outBlockMax = LZMA2DECMT_OUT_BLOCK_MAX_DEFAULT;
|
||||
|
@ -48,7 +48,7 @@ void Lzma2DecMtProps_Init(CLzma2DecMtProps *p)
|
|||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
/* ---------- CLzma2DecMtThread ---------- */
|
||||
|
||||
|
@ -81,7 +81,7 @@ typedef struct
|
|||
|
||||
/* ---------- CLzma2DecMt ---------- */
|
||||
|
||||
typedef struct
|
||||
struct CLzma2DecMt
|
||||
{
|
||||
// ISzAllocPtr alloc;
|
||||
ISzAllocPtr allocMid;
|
||||
|
@ -90,9 +90,9 @@ typedef struct
|
|||
CLzma2DecMtProps props;
|
||||
Byte prop;
|
||||
|
||||
ISeqInStream *inStream;
|
||||
ISeqOutStream *outStream;
|
||||
ICompressProgress *progress;
|
||||
ISeqInStreamPtr inStream;
|
||||
ISeqOutStreamPtr outStream;
|
||||
ICompressProgressPtr progress;
|
||||
|
||||
BoolInt finishMode;
|
||||
BoolInt outSize_Defined;
|
||||
|
@ -111,14 +111,13 @@ typedef struct
|
|||
size_t inPos;
|
||||
size_t inLim;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
UInt64 outProcessed_Parse;
|
||||
BoolInt mtc_WasConstructed;
|
||||
CMtDec mtc;
|
||||
CLzma2DecMtThread coders[MTDEC__THREADS_MAX];
|
||||
CLzma2DecMtThread coders[MTDEC_THREADS_MAX];
|
||||
#endif
|
||||
|
||||
} CLzma2DecMt;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
@ -142,11 +141,11 @@ CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid)
|
|||
|
||||
// Lzma2DecMtProps_Init(&p->props);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->mtc_WasConstructed = False;
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTDEC__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTDEC_THREADS_MAX; i++)
|
||||
{
|
||||
CLzma2DecMtThread *t = &p->coders[i];
|
||||
t->dec_created = False;
|
||||
|
@ -156,16 +155,16 @@ CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid)
|
|||
}
|
||||
#endif
|
||||
|
||||
return p;
|
||||
return (CLzma2DecMtHandle)(void *)p;
|
||||
}
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static void Lzma2DecMt_FreeOutBufs(CLzma2DecMt *p)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTDEC__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTDEC_THREADS_MAX; i++)
|
||||
{
|
||||
CLzma2DecMtThread *t = &p->coders[i];
|
||||
if (t->outBuf)
|
||||
|
@ -196,13 +195,15 @@ static void Lzma2DecMt_FreeSt(CLzma2DecMt *p)
|
|||
}
|
||||
|
||||
|
||||
void Lzma2DecMt_Destroy(CLzma2DecMtHandle pp)
|
||||
// #define GET_CLzma2DecMt_p CLzma2DecMt *p = (CLzma2DecMt *)(void *)pp;
|
||||
|
||||
void Lzma2DecMt_Destroy(CLzma2DecMtHandle p)
|
||||
{
|
||||
CLzma2DecMt *p = (CLzma2DecMt *)pp;
|
||||
// GET_CLzma2DecMt_p
|
||||
|
||||
Lzma2DecMt_FreeSt(p);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
if (p->mtc_WasConstructed)
|
||||
{
|
||||
|
@ -211,7 +212,7 @@ void Lzma2DecMt_Destroy(CLzma2DecMtHandle pp)
|
|||
}
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTDEC__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTDEC_THREADS_MAX; i++)
|
||||
{
|
||||
CLzma2DecMtThread *t = &p->coders[i];
|
||||
if (t->dec_created)
|
||||
|
@ -226,19 +227,19 @@ void Lzma2DecMt_Destroy(CLzma2DecMtHandle pp)
|
|||
|
||||
#endif
|
||||
|
||||
ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, pp);
|
||||
ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, p);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCallbackInfo *cc)
|
||||
{
|
||||
CLzma2DecMt *me = (CLzma2DecMt *)obj;
|
||||
CLzma2DecMtThread *t = &me->coders[coderIndex];
|
||||
|
||||
PRF_STR_INT_2("Parse", coderIndex, cc->srcSize);
|
||||
PRF_STR_INT_2("Parse", coderIndex, cc->srcSize)
|
||||
|
||||
cc->state = MTDEC_PARSE_CONTINUE;
|
||||
|
||||
|
@ -246,7 +247,7 @@ static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCa
|
|||
{
|
||||
if (!t->dec_created)
|
||||
{
|
||||
Lzma2Dec_Construct(&t->dec);
|
||||
Lzma2Dec_CONSTRUCT(&t->dec)
|
||||
t->dec_created = True;
|
||||
AlignOffsetAlloc_CreateVTable(&t->alloc);
|
||||
{
|
||||
|
@ -297,7 +298,7 @@ static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCa
|
|||
// that must be finished at position <= outBlockMax.
|
||||
|
||||
{
|
||||
const SizeT srcOrig = cc->srcSize;
|
||||
const size_t srcOrig = cc->srcSize;
|
||||
SizeT srcSize_Point = 0;
|
||||
SizeT dicPos_Point = 0;
|
||||
|
||||
|
@ -306,10 +307,10 @@ static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCa
|
|||
|
||||
for (;;)
|
||||
{
|
||||
SizeT srcCur = srcOrig - cc->srcSize;
|
||||
SizeT srcCur = (SizeT)(srcOrig - cc->srcSize);
|
||||
|
||||
status = Lzma2Dec_Parse(&t->dec,
|
||||
limit - t->dec.decoder.dicPos,
|
||||
(SizeT)limit - t->dec.decoder.dicPos,
|
||||
cc->src + cc->srcSize, &srcCur,
|
||||
checkFinishBlock);
|
||||
|
||||
|
@ -333,7 +334,7 @@ static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCa
|
|||
if (t->dec.decoder.dicPos >= (1 << 14))
|
||||
break;
|
||||
dicPos_Point = t->dec.decoder.dicPos;
|
||||
srcSize_Point = cc->srcSize;
|
||||
srcSize_Point = (SizeT)cc->srcSize;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -391,7 +392,7 @@ static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCa
|
|||
if (unpackRem != 0)
|
||||
{
|
||||
/* we also reserve space for max possible number of output bytes of current LZMA chunk */
|
||||
SizeT rem = limit - dicPos;
|
||||
size_t rem = limit - dicPos;
|
||||
if (rem > unpackRem)
|
||||
rem = unpackRem;
|
||||
dicPos += rem;
|
||||
|
@ -444,7 +445,7 @@ static SRes Lzma2DecMt_MtCallback_PreCode(void *pp, unsigned coderIndex)
|
|||
}
|
||||
|
||||
t->dec.decoder.dic = dest;
|
||||
t->dec.decoder.dicBufSize = t->outPreSize;
|
||||
t->dec.decoder.dicBufSize = (SizeT)t->outPreSize;
|
||||
|
||||
t->needInit = True;
|
||||
|
||||
|
@ -462,7 +463,7 @@ static SRes Lzma2DecMt_MtCallback_Code(void *pp, unsigned coderIndex,
|
|||
|
||||
UNUSED_VAR(srcFinished)
|
||||
|
||||
PRF_STR_INT_2("Code", coderIndex, srcSize);
|
||||
PRF_STR_INT_2("Code", coderIndex, srcSize)
|
||||
|
||||
*inCodePos = t->inCodeSize;
|
||||
*outCodePos = 0;
|
||||
|
@ -476,13 +477,13 @@ static SRes Lzma2DecMt_MtCallback_Code(void *pp, unsigned coderIndex,
|
|||
|
||||
{
|
||||
ELzmaStatus status;
|
||||
size_t srcProcessed = srcSize;
|
||||
SizeT srcProcessed = (SizeT)srcSize;
|
||||
BoolInt blockWasFinished =
|
||||
((int)t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK
|
||||
|| t->parseStatus == LZMA2_PARSE_STATUS_NEW_BLOCK);
|
||||
|
||||
SRes res = Lzma2Dec_DecodeToDic(&t->dec,
|
||||
t->outPreSize,
|
||||
(SizeT)t->outPreSize,
|
||||
src, &srcProcessed,
|
||||
blockWasFinished ? LZMA_FINISH_END : LZMA_FINISH_ANY,
|
||||
&status);
|
||||
|
@ -540,7 +541,7 @@ static SRes Lzma2DecMt_MtCallback_Write(void *pp, unsigned coderIndex,
|
|||
UNUSED_VAR(srcSize)
|
||||
UNUSED_VAR(isCross)
|
||||
|
||||
PRF_STR_INT_2("Write", coderIndex, srcSize);
|
||||
PRF_STR_INT_2("Write", coderIndex, srcSize)
|
||||
|
||||
*needContinue = False;
|
||||
*canRecode = True;
|
||||
|
@ -588,7 +589,7 @@ static SRes Lzma2DecMt_MtCallback_Write(void *pp, unsigned coderIndex,
|
|||
*needContinue = needContinue2;
|
||||
return SZ_OK;
|
||||
}
|
||||
RINOK(MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0));
|
||||
RINOK(MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -611,11 +612,11 @@ static SRes Lzma2Dec_Prepare_ST(CLzma2DecMt *p)
|
|||
{
|
||||
if (!p->dec_created)
|
||||
{
|
||||
Lzma2Dec_Construct(&p->dec);
|
||||
Lzma2Dec_CONSTRUCT(&p->dec)
|
||||
p->dec_created = True;
|
||||
}
|
||||
|
||||
RINOK(Lzma2Dec_Allocate(&p->dec, p->prop, &p->alignOffsetAlloc.vt));
|
||||
RINOK(Lzma2Dec_Allocate(&p->dec, p->prop, &p->alignOffsetAlloc.vt))
|
||||
|
||||
if (!p->inBuf || p->inBufSize != p->props.inBufSize_ST)
|
||||
{
|
||||
|
@ -634,7 +635,7 @@ static SRes Lzma2Dec_Prepare_ST(CLzma2DecMt *p)
|
|||
|
||||
|
||||
static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
, BoolInt tMode
|
||||
#endif
|
||||
)
|
||||
|
@ -646,7 +647,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
|
||||
CLzma2Dec *dec;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (tMode)
|
||||
{
|
||||
Lzma2DecMt_FreeOutBufs(p);
|
||||
|
@ -654,7 +655,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
}
|
||||
#endif
|
||||
|
||||
RINOK(Lzma2Dec_Prepare_ST(p));
|
||||
RINOK(Lzma2Dec_Prepare_ST(p))
|
||||
|
||||
dec = &p->dec;
|
||||
|
||||
|
@ -681,7 +682,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
|
||||
if (inPos == inLim)
|
||||
{
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (tMode)
|
||||
{
|
||||
inData = MtDec_Read(&p->mtc, &inLim);
|
||||
|
@ -710,7 +711,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
{
|
||||
SizeT next = dec->decoder.dicBufSize;
|
||||
if (next - wrPos > p->props.outStep_ST)
|
||||
next = wrPos + p->props.outStep_ST;
|
||||
next = wrPos + (SizeT)p->props.outStep_ST;
|
||||
size = next - dicPos;
|
||||
}
|
||||
|
||||
|
@ -726,7 +727,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
}
|
||||
}
|
||||
|
||||
inProcessed = inLim - inPos;
|
||||
inProcessed = (SizeT)(inLim - inPos);
|
||||
|
||||
res = Lzma2Dec_DecodeToDic(dec, dicPos + size, inData + inPos, &inProcessed, finishMode, &status);
|
||||
|
||||
|
@ -755,7 +756,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
dec->decoder.dicPos = 0;
|
||||
wrPos = dec->decoder.dicPos;
|
||||
|
||||
RINOK(res2);
|
||||
RINOK(res2)
|
||||
|
||||
if (needStop)
|
||||
{
|
||||
|
@ -788,7 +789,7 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
UInt64 outDelta = p->outProcessed - outPrev;
|
||||
if (inDelta >= (1 << 22) || outDelta >= (1 << 22))
|
||||
{
|
||||
RINOK(ICompressProgress_Progress(p->progress, p->inProcessed, p->outProcessed));
|
||||
RINOK(ICompressProgress_Progress(p->progress, p->inProcessed, p->outProcessed))
|
||||
inPrev = p->inProcessed;
|
||||
outPrev = p->outProcessed;
|
||||
}
|
||||
|
@ -798,20 +799,20 @@ static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p
|
|||
|
||||
|
||||
|
||||
SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp,
|
||||
SRes Lzma2DecMt_Decode(CLzma2DecMtHandle p,
|
||||
Byte prop,
|
||||
const CLzma2DecMtProps *props,
|
||||
ISeqOutStream *outStream, const UInt64 *outDataSize, int finishMode,
|
||||
ISeqOutStreamPtr outStream, const UInt64 *outDataSize, int finishMode,
|
||||
// Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
// const Byte *inData, size_t inDataSize,
|
||||
UInt64 *inProcessed,
|
||||
// UInt64 *outProcessed,
|
||||
int *isMT,
|
||||
ICompressProgress *progress)
|
||||
ICompressProgressPtr progress)
|
||||
{
|
||||
CLzma2DecMt *p = (CLzma2DecMt *)pp;
|
||||
#ifndef _7ZIP_ST
|
||||
// GET_CLzma2DecMt_p
|
||||
#ifndef Z7_ST
|
||||
BoolInt tMode;
|
||||
#endif
|
||||
|
||||
|
@ -845,7 +846,7 @@ SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp,
|
|||
*isMT = False;
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
tMode = False;
|
||||
|
||||
|
@ -939,7 +940,7 @@ SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp,
|
|||
p->readWasFinished = p->mtc.readWasFinished;
|
||||
p->inProcessed = p->mtc.inProcessed;
|
||||
|
||||
PRF_STR("----- decoding ST -----");
|
||||
PRF_STR("----- decoding ST -----")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -950,7 +951,7 @@ SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp,
|
|||
|
||||
{
|
||||
SRes res = Lzma2Dec_Decode_ST(p
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
, tMode
|
||||
#endif
|
||||
);
|
||||
|
@ -967,7 +968,7 @@ SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp,
|
|||
res = p->readRes;
|
||||
|
||||
/*
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (res == SZ_OK && tMode && p->mtc.parseRes != SZ_OK)
|
||||
res = p->mtc.parseRes;
|
||||
#endif
|
||||
|
@ -980,13 +981,13 @@ SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp,
|
|||
|
||||
/* ---------- Read from CLzma2DecMtHandle Interface ---------- */
|
||||
|
||||
SRes Lzma2DecMt_Init(CLzma2DecMtHandle pp,
|
||||
SRes Lzma2DecMt_Init(CLzma2DecMtHandle p,
|
||||
Byte prop,
|
||||
const CLzma2DecMtProps *props,
|
||||
const UInt64 *outDataSize, int finishMode,
|
||||
ISeqInStream *inStream)
|
||||
ISeqInStreamPtr inStream)
|
||||
{
|
||||
CLzma2DecMt *p = (CLzma2DecMt *)pp;
|
||||
// GET_CLzma2DecMt_p
|
||||
|
||||
if (prop > 40)
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
|
@ -1015,11 +1016,11 @@ SRes Lzma2DecMt_Init(CLzma2DecMtHandle pp,
|
|||
}
|
||||
|
||||
|
||||
SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp,
|
||||
SRes Lzma2DecMt_Read(CLzma2DecMtHandle p,
|
||||
Byte *data, size_t *outSize,
|
||||
UInt64 *inStreamProcessed)
|
||||
{
|
||||
CLzma2DecMt *p = (CLzma2DecMt *)pp;
|
||||
// GET_CLzma2DecMt_p
|
||||
ELzmaFinishMode finishMode;
|
||||
SRes readRes;
|
||||
size_t size = *outSize;
|
||||
|
@ -1055,8 +1056,8 @@ SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp,
|
|||
readRes = ISeqInStream_Read(p->inStream, p->inBuf, &p->inLim);
|
||||
}
|
||||
|
||||
inCur = p->inLim - p->inPos;
|
||||
outCur = size;
|
||||
inCur = (SizeT)(p->inLim - p->inPos);
|
||||
outCur = (SizeT)size;
|
||||
|
||||
res = Lzma2Dec_DecodeToBuf(&p->dec, data, &outCur,
|
||||
p->inBuf + p->inPos, &inCur, finishMode, &status);
|
||||
|
@ -1088,3 +1089,7 @@ SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp,
|
|||
return readRes;
|
||||
}
|
||||
}
|
||||
|
||||
#undef PRF
|
||||
#undef PRF_STR
|
||||
#undef PRF_STR_INT_2
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
/* Lzma2Enc.c -- LZMA2 Encoder
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
/* #define _7ZIP_ST */
|
||||
/* #define Z7_ST */
|
||||
|
||||
#include "Lzma2Enc.h"
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
#include "MtCoder.h"
|
||||
#else
|
||||
#define MTCODER__THREADS_MAX 1
|
||||
#define MTCODER_THREADS_MAX 1
|
||||
#endif
|
||||
|
||||
#define LZMA2_CONTROL_LZMA (1 << 7)
|
||||
|
@ -40,7 +40,7 @@
|
|||
typedef struct
|
||||
{
|
||||
ISeqInStream vt;
|
||||
ISeqInStream *realStream;
|
||||
ISeqInStreamPtr realStream;
|
||||
UInt64 limit;
|
||||
UInt64 processed;
|
||||
int finished;
|
||||
|
@ -53,15 +53,15 @@ static void LimitedSeqInStream_Init(CLimitedSeqInStream *p)
|
|||
p->finished = 0;
|
||||
}
|
||||
|
||||
static SRes LimitedSeqInStream_Read(const ISeqInStream *pp, void *data, size_t *size)
|
||||
static SRes LimitedSeqInStream_Read(ISeqInStreamPtr pp, void *data, size_t *size)
|
||||
{
|
||||
CLimitedSeqInStream *p = CONTAINER_FROM_VTBL(pp, CLimitedSeqInStream, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLimitedSeqInStream)
|
||||
size_t size2 = *size;
|
||||
SRes res = SZ_OK;
|
||||
|
||||
if (p->limit != (UInt64)(Int64)-1)
|
||||
{
|
||||
UInt64 rem = p->limit - p->processed;
|
||||
const UInt64 rem = p->limit - p->processed;
|
||||
if (size2 > rem)
|
||||
size2 = (size_t)rem;
|
||||
}
|
||||
|
@ -95,8 +95,8 @@ static SRes Lzma2EncInt_InitStream(CLzma2EncInt *p, const CLzma2EncProps *props)
|
|||
{
|
||||
SizeT propsSize = LZMA_PROPS_SIZE;
|
||||
Byte propsEncoded[LZMA_PROPS_SIZE];
|
||||
RINOK(LzmaEnc_SetProps(p->enc, &props->lzmaProps));
|
||||
RINOK(LzmaEnc_WriteProperties(p->enc, propsEncoded, &propsSize));
|
||||
RINOK(LzmaEnc_SetProps(p->enc, &props->lzmaProps))
|
||||
RINOK(LzmaEnc_WriteProperties(p->enc, propsEncoded, &propsSize))
|
||||
p->propsByte = propsEncoded[0];
|
||||
p->propsAreSet = True;
|
||||
}
|
||||
|
@ -111,23 +111,23 @@ static void Lzma2EncInt_InitBlock(CLzma2EncInt *p)
|
|||
}
|
||||
|
||||
|
||||
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ISeqInStream *inStream, UInt32 keepWindowSize,
|
||||
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p, ISeqInStreamPtr inStream, UInt32 keepWindowSize,
|
||||
ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
|
||||
SRes LzmaEnc_MemPrepare(CLzmaEncHandle p, const Byte *src, SizeT srcLen,
|
||||
UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
|
||||
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
|
||||
Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
|
||||
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp);
|
||||
void LzmaEnc_Finish(CLzmaEncHandle pp);
|
||||
void LzmaEnc_SaveState(CLzmaEncHandle pp);
|
||||
void LzmaEnc_RestoreState(CLzmaEncHandle pp);
|
||||
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p);
|
||||
void LzmaEnc_Finish(CLzmaEncHandle p);
|
||||
void LzmaEnc_SaveState(CLzmaEncHandle p);
|
||||
void LzmaEnc_RestoreState(CLzmaEncHandle p);
|
||||
|
||||
/*
|
||||
UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp);
|
||||
UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle p);
|
||||
*/
|
||||
|
||||
static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
|
||||
size_t *packSizeRes, ISeqOutStream *outStream)
|
||||
size_t *packSizeRes, ISeqOutStreamPtr outStream)
|
||||
{
|
||||
size_t packSizeLimit = *packSizeRes;
|
||||
size_t packSize = packSizeLimit;
|
||||
|
@ -167,7 +167,7 @@ static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
|
|||
|
||||
while (unpackSize > 0)
|
||||
{
|
||||
UInt32 u = (unpackSize < LZMA2_COPY_CHUNK_SIZE) ? unpackSize : LZMA2_COPY_CHUNK_SIZE;
|
||||
const UInt32 u = (unpackSize < LZMA2_COPY_CHUNK_SIZE) ? unpackSize : LZMA2_COPY_CHUNK_SIZE;
|
||||
if (packSizeLimit - destPos < u + 3)
|
||||
return SZ_ERROR_OUTPUT_EOF;
|
||||
outBuf[destPos++] = (Byte)(p->srcPos == 0 ? LZMA2_CONTROL_COPY_RESET_DIC : LZMA2_CONTROL_COPY_NO_RESET);
|
||||
|
@ -196,9 +196,9 @@ static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
|
|||
|
||||
{
|
||||
size_t destPos = 0;
|
||||
UInt32 u = unpackSize - 1;
|
||||
UInt32 pm = (UInt32)(packSize - 1);
|
||||
unsigned mode = (p->srcPos == 0) ? 3 : (p->needInitState ? (p->needInitProp ? 2 : 1) : 0);
|
||||
const UInt32 u = unpackSize - 1;
|
||||
const UInt32 pm = (UInt32)(packSize - 1);
|
||||
const unsigned mode = (p->srcPos == 0) ? 3 : (p->needInitState ? (p->needInitProp ? 2 : 1) : 0);
|
||||
|
||||
PRF(printf(" "));
|
||||
|
||||
|
@ -231,7 +231,7 @@ static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
|
|||
void Lzma2EncProps_Init(CLzma2EncProps *p)
|
||||
{
|
||||
LzmaEncProps_Init(&p->lzmaProps);
|
||||
p->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO;
|
||||
p->blockSize = LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO;
|
||||
p->numBlockThreads_Reduced = -1;
|
||||
p->numBlockThreads_Max = -1;
|
||||
p->numTotalThreads = -1;
|
||||
|
@ -251,8 +251,8 @@ void Lzma2EncProps_Normalize(CLzma2EncProps *p)
|
|||
t2 = p->numBlockThreads_Max;
|
||||
t3 = p->numTotalThreads;
|
||||
|
||||
if (t2 > MTCODER__THREADS_MAX)
|
||||
t2 = MTCODER__THREADS_MAX;
|
||||
if (t2 > MTCODER_THREADS_MAX)
|
||||
t2 = MTCODER_THREADS_MAX;
|
||||
|
||||
if (t3 <= 0)
|
||||
{
|
||||
|
@ -268,8 +268,8 @@ void Lzma2EncProps_Normalize(CLzma2EncProps *p)
|
|||
t1 = 1;
|
||||
t2 = t3;
|
||||
}
|
||||
if (t2 > MTCODER__THREADS_MAX)
|
||||
t2 = MTCODER__THREADS_MAX;
|
||||
if (t2 > MTCODER_THREADS_MAX)
|
||||
t2 = MTCODER_THREADS_MAX;
|
||||
}
|
||||
else if (t1 <= 0)
|
||||
{
|
||||
|
@ -286,8 +286,8 @@ void Lzma2EncProps_Normalize(CLzma2EncProps *p)
|
|||
|
||||
fileSize = p->lzmaProps.reduceSize;
|
||||
|
||||
if ( p->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
|
||||
&& p->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO
|
||||
if ( p->blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
|
||||
&& p->blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO
|
||||
&& (p->blockSize < fileSize || fileSize == (UInt64)(Int64)-1))
|
||||
p->lzmaProps.reduceSize = p->blockSize;
|
||||
|
||||
|
@ -297,19 +297,19 @@ void Lzma2EncProps_Normalize(CLzma2EncProps *p)
|
|||
|
||||
t1 = p->lzmaProps.numThreads;
|
||||
|
||||
if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID)
|
||||
if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
|
||||
{
|
||||
t2r = t2 = 1;
|
||||
t3 = t1;
|
||||
}
|
||||
else if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO && t2 <= 1)
|
||||
else if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO && t2 <= 1)
|
||||
{
|
||||
/* if there is no block multi-threading, we use SOLID block */
|
||||
p->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID;
|
||||
p->blockSize = LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO)
|
||||
if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
|
||||
{
|
||||
const UInt32 kMinSize = (UInt32)1 << 20;
|
||||
const UInt32 kMaxSize = (UInt32)1 << 28;
|
||||
|
@ -344,7 +344,7 @@ void Lzma2EncProps_Normalize(CLzma2EncProps *p)
|
|||
}
|
||||
|
||||
|
||||
static SRes Progress(ICompressProgress *p, UInt64 inSize, UInt64 outSize)
|
||||
static SRes Progress(ICompressProgressPtr p, UInt64 inSize, UInt64 outSize)
|
||||
{
|
||||
return (p && ICompressProgress_Progress(p, inSize, outSize) != SZ_OK) ? SZ_ERROR_PROGRESS : SZ_OK;
|
||||
}
|
||||
|
@ -352,7 +352,7 @@ static SRes Progress(ICompressProgress *p, UInt64 inSize, UInt64 outSize)
|
|||
|
||||
/* ---------- Lzma2 ---------- */
|
||||
|
||||
typedef struct
|
||||
struct CLzma2Enc
|
||||
{
|
||||
Byte propEncoded;
|
||||
CLzma2EncProps props;
|
||||
|
@ -363,23 +363,22 @@ typedef struct
|
|||
ISzAllocPtr alloc;
|
||||
ISzAllocPtr allocBig;
|
||||
|
||||
CLzma2EncInt coders[MTCODER__THREADS_MAX];
|
||||
CLzma2EncInt coders[MTCODER_THREADS_MAX];
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
ISeqOutStream *outStream;
|
||||
ISeqOutStreamPtr outStream;
|
||||
Byte *outBuf;
|
||||
size_t outBuf_Rem; /* remainder in outBuf */
|
||||
|
||||
size_t outBufSize; /* size of allocated outBufs[i] */
|
||||
size_t outBufsDataSizes[MTCODER__BLOCKS_MAX];
|
||||
size_t outBufsDataSizes[MTCODER_BLOCKS_MAX];
|
||||
BoolInt mtCoder_WasConstructed;
|
||||
CMtCoder mtCoder;
|
||||
Byte *outBufs[MTCODER__BLOCKS_MAX];
|
||||
Byte *outBufs[MTCODER_BLOCKS_MAX];
|
||||
|
||||
#endif
|
||||
|
||||
} CLzma2Enc;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
@ -396,30 +395,30 @@ CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
|||
p->allocBig = allocBig;
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTCODER__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
p->coders[i].enc = NULL;
|
||||
}
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->mtCoder_WasConstructed = False;
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTCODER__BLOCKS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
p->outBufs[i] = NULL;
|
||||
p->outBufSize = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
return p;
|
||||
return (CLzma2EncHandle)p;
|
||||
}
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static void Lzma2Enc_FreeOutBufs(CLzma2Enc *p)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTCODER__BLOCKS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
if (p->outBufs[i])
|
||||
{
|
||||
ISzAlloc_Free(p->alloc, p->outBufs[i]);
|
||||
|
@ -430,12 +429,13 @@ static void Lzma2Enc_FreeOutBufs(CLzma2Enc *p)
|
|||
|
||||
#endif
|
||||
|
||||
// #define GET_CLzma2Enc_p CLzma2Enc *p = (CLzma2Enc *)(void *)p;
|
||||
|
||||
void Lzma2Enc_Destroy(CLzma2EncHandle pp)
|
||||
void Lzma2Enc_Destroy(CLzma2EncHandle p)
|
||||
{
|
||||
CLzma2Enc *p = (CLzma2Enc *)pp;
|
||||
// GET_CLzma2Enc_p
|
||||
unsigned i;
|
||||
for (i = 0; i < MTCODER__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
{
|
||||
CLzma2EncInt *t = &p->coders[i];
|
||||
if (t->enc)
|
||||
|
@ -446,7 +446,7 @@ void Lzma2Enc_Destroy(CLzma2EncHandle pp)
|
|||
}
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (p->mtCoder_WasConstructed)
|
||||
{
|
||||
MtCoder_Destruct(&p->mtCoder);
|
||||
|
@ -458,13 +458,13 @@ void Lzma2Enc_Destroy(CLzma2EncHandle pp)
|
|||
ISzAlloc_Free(p->alloc, p->tempBufLzma);
|
||||
p->tempBufLzma = NULL;
|
||||
|
||||
ISzAlloc_Free(p->alloc, pp);
|
||||
ISzAlloc_Free(p->alloc, p);
|
||||
}
|
||||
|
||||
|
||||
SRes Lzma2Enc_SetProps(CLzma2EncHandle pp, const CLzma2EncProps *props)
|
||||
SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props)
|
||||
{
|
||||
CLzma2Enc *p = (CLzma2Enc *)pp;
|
||||
// GET_CLzma2Enc_p
|
||||
CLzmaEncProps lzmaProps = props->lzmaProps;
|
||||
LzmaEncProps_Normalize(&lzmaProps);
|
||||
if (lzmaProps.lc + lzmaProps.lp > LZMA2_LCLP_MAX)
|
||||
|
@ -475,16 +475,16 @@ SRes Lzma2Enc_SetProps(CLzma2EncHandle pp, const CLzma2EncProps *props)
|
|||
}
|
||||
|
||||
|
||||
void Lzma2Enc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize)
|
||||
void Lzma2Enc_SetDataSize(CLzma2EncHandle p, UInt64 expectedDataSiize)
|
||||
{
|
||||
CLzma2Enc *p = (CLzma2Enc *)pp;
|
||||
// GET_CLzma2Enc_p
|
||||
p->expectedDataSize = expectedDataSiize;
|
||||
}
|
||||
|
||||
|
||||
Byte Lzma2Enc_WriteProperties(CLzma2EncHandle pp)
|
||||
Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p)
|
||||
{
|
||||
CLzma2Enc *p = (CLzma2Enc *)pp;
|
||||
// GET_CLzma2Enc_p
|
||||
unsigned i;
|
||||
UInt32 dicSize = LzmaEncProps_GetDictSize(&p->props.lzmaProps);
|
||||
for (i = 0; i < 40; i++)
|
||||
|
@ -497,12 +497,12 @@ Byte Lzma2Enc_WriteProperties(CLzma2EncHandle pp)
|
|||
static SRes Lzma2Enc_EncodeMt1(
|
||||
CLzma2Enc *me,
|
||||
CLzma2EncInt *p,
|
||||
ISeqOutStream *outStream,
|
||||
ISeqOutStreamPtr outStream,
|
||||
Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
const Byte *inData, size_t inDataSize,
|
||||
int finished,
|
||||
ICompressProgress *progress)
|
||||
ICompressProgressPtr progress)
|
||||
{
|
||||
UInt64 unpackTotal = 0;
|
||||
UInt64 packTotal = 0;
|
||||
|
@ -540,12 +540,12 @@ static SRes Lzma2Enc_EncodeMt1(
|
|||
}
|
||||
}
|
||||
|
||||
RINOK(Lzma2EncInt_InitStream(p, &me->props));
|
||||
RINOK(Lzma2EncInt_InitStream(p, &me->props))
|
||||
|
||||
for (;;)
|
||||
{
|
||||
SRes res = SZ_OK;
|
||||
size_t inSizeCur = 0;
|
||||
SizeT inSizeCur = 0;
|
||||
|
||||
Lzma2EncInt_InitBlock(p);
|
||||
|
||||
|
@ -559,7 +559,7 @@ static SRes Lzma2Enc_EncodeMt1(
|
|||
if (me->expectedDataSize != (UInt64)(Int64)-1
|
||||
&& me->expectedDataSize >= unpackTotal)
|
||||
expected = me->expectedDataSize - unpackTotal;
|
||||
if (me->props.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
|
||||
if (me->props.blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
|
||||
&& expected > me->props.blockSize)
|
||||
expected = (size_t)me->props.blockSize;
|
||||
|
||||
|
@ -569,14 +569,14 @@ static SRes Lzma2Enc_EncodeMt1(
|
|||
&limitedInStream.vt,
|
||||
LZMA2_KEEP_WINDOW_SIZE,
|
||||
me->alloc,
|
||||
me->allocBig));
|
||||
me->allocBig))
|
||||
}
|
||||
else
|
||||
{
|
||||
inSizeCur = inDataSize - (size_t)unpackTotal;
|
||||
if (me->props.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
|
||||
inSizeCur = (SizeT)(inDataSize - (size_t)unpackTotal);
|
||||
if (me->props.blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
|
||||
&& inSizeCur > me->props.blockSize)
|
||||
inSizeCur = (size_t)me->props.blockSize;
|
||||
inSizeCur = (SizeT)(size_t)me->props.blockSize;
|
||||
|
||||
// LzmaEnc_SetDataSize(p->enc, inSizeCur);
|
||||
|
||||
|
@ -584,7 +584,7 @@ static SRes Lzma2Enc_EncodeMt1(
|
|||
inData + (size_t)unpackTotal, inSizeCur,
|
||||
LZMA2_KEEP_WINDOW_SIZE,
|
||||
me->alloc,
|
||||
me->allocBig));
|
||||
me->allocBig))
|
||||
}
|
||||
|
||||
for (;;)
|
||||
|
@ -621,7 +621,7 @@ static SRes Lzma2Enc_EncodeMt1(
|
|||
|
||||
unpackTotal += p->srcPos;
|
||||
|
||||
RINOK(res);
|
||||
RINOK(res)
|
||||
|
||||
if (p->srcPos != (inStream ? limitedInStream.processed : inSizeCur))
|
||||
return SZ_ERROR_FAIL;
|
||||
|
@ -652,12 +652,12 @@ static SRes Lzma2Enc_EncodeMt1(
|
|||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static SRes Lzma2Enc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBufIndex,
|
||||
static SRes Lzma2Enc_MtCallback_Code(void *p, unsigned coderIndex, unsigned outBufIndex,
|
||||
const Byte *src, size_t srcSize, int finished)
|
||||
{
|
||||
CLzma2Enc *me = (CLzma2Enc *)pp;
|
||||
CLzma2Enc *me = (CLzma2Enc *)p;
|
||||
size_t destSize = me->outBufSize;
|
||||
SRes res;
|
||||
CMtProgressThunk progressThunk;
|
||||
|
@ -692,9 +692,9 @@ static SRes Lzma2Enc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned out
|
|||
}
|
||||
|
||||
|
||||
static SRes Lzma2Enc_MtCallback_Write(void *pp, unsigned outBufIndex)
|
||||
static SRes Lzma2Enc_MtCallback_Write(void *p, unsigned outBufIndex)
|
||||
{
|
||||
CLzma2Enc *me = (CLzma2Enc *)pp;
|
||||
CLzma2Enc *me = (CLzma2Enc *)p;
|
||||
size_t size = me->outBufsDataSizes[outBufIndex];
|
||||
const Byte *data = me->outBufs[outBufIndex];
|
||||
|
||||
|
@ -713,14 +713,14 @@ static SRes Lzma2Enc_MtCallback_Write(void *pp, unsigned outBufIndex)
|
|||
|
||||
|
||||
|
||||
SRes Lzma2Enc_Encode2(CLzma2EncHandle pp,
|
||||
ISeqOutStream *outStream,
|
||||
SRes Lzma2Enc_Encode2(CLzma2EncHandle p,
|
||||
ISeqOutStreamPtr outStream,
|
||||
Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
const Byte *inData, size_t inDataSize,
|
||||
ICompressProgress *progress)
|
||||
ICompressProgressPtr progress)
|
||||
{
|
||||
CLzma2Enc *p = (CLzma2Enc *)pp;
|
||||
// GET_CLzma2Enc_p
|
||||
|
||||
if (inStream && inData)
|
||||
return SZ_ERROR_PARAM;
|
||||
|
@ -730,11 +730,11 @@ SRes Lzma2Enc_Encode2(CLzma2EncHandle pp,
|
|||
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTCODER__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
p->coders[i].propsAreSet = False;
|
||||
}
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
if (p->props.numBlockThreads_Reduced > 1)
|
||||
{
|
||||
|
@ -772,7 +772,7 @@ SRes Lzma2Enc_Encode2(CLzma2EncHandle pp,
|
|||
return SZ_ERROR_PARAM; /* SZ_ERROR_MEM */
|
||||
|
||||
{
|
||||
size_t destBlockSize = p->mtCoder.blockSize + (p->mtCoder.blockSize >> 10) + 16;
|
||||
const size_t destBlockSize = p->mtCoder.blockSize + (p->mtCoder.blockSize >> 10) + 16;
|
||||
if (destBlockSize < p->mtCoder.blockSize)
|
||||
return SZ_ERROR_PARAM;
|
||||
if (p->outBufSize != destBlockSize)
|
||||
|
@ -784,7 +784,7 @@ SRes Lzma2Enc_Encode2(CLzma2EncHandle pp,
|
|||
p->mtCoder.expectedDataSize = p->expectedDataSize;
|
||||
|
||||
{
|
||||
SRes res = MtCoder_Code(&p->mtCoder);
|
||||
const SRes res = MtCoder_Code(&p->mtCoder);
|
||||
if (!outStream)
|
||||
*outBufSize = (size_t)(p->outBuf - outBuf);
|
||||
return res;
|
||||
|
@ -801,3 +801,5 @@ SRes Lzma2Enc_Encode2(CLzma2EncHandle pp,
|
|||
True, /* finished */
|
||||
progress);
|
||||
}
|
||||
|
||||
#undef PRF
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Lzma86Dec.c -- LZMA + x86 (BCJ) Filter Decoder
|
||||
2016-05-16 : Igor Pavlov : Public domain */
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -46,9 +46,8 @@ SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen)
|
|||
return res;
|
||||
if (useFilter == 1)
|
||||
{
|
||||
UInt32 x86State;
|
||||
x86_Convert_Init(x86State);
|
||||
x86_Convert(dest, *destLen, 0, &x86State, 0);
|
||||
UInt32 x86State = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
|
||||
z7_BranchConvSt_X86_Dec(dest, *destLen, 0, &x86State);
|
||||
}
|
||||
return SZ_OK;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Lzma86Enc.c -- LZMA + x86 (BCJ) Filter Encoder
|
||||
2018-07-04 : Igor Pavlov : Public domain */
|
||||
2023-03-03 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -46,9 +46,8 @@ int Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen,
|
|||
memcpy(filteredStream, src, srcLen);
|
||||
}
|
||||
{
|
||||
UInt32 x86State;
|
||||
x86_Convert_Init(x86State);
|
||||
x86_Convert(filteredStream, srcLen, 0, &x86State, 1);
|
||||
UInt32 x86State = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
|
||||
z7_BranchConvSt_X86_Enc(filteredStream, srcLen, 0, &x86State);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* LzmaDec.c -- LZMA Decoder
|
||||
2021-04-01 : Igor Pavlov : Public domain */
|
||||
2023-04-07 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -8,15 +8,15 @@
|
|||
/* #include "CpuArch.h" */
|
||||
#include "LzmaDec.h"
|
||||
|
||||
#define kNumTopBits 24
|
||||
#define kTopValue ((UInt32)1 << kNumTopBits)
|
||||
// #define kNumTopBits 24
|
||||
#define kTopValue ((UInt32)1 << 24)
|
||||
|
||||
#define kNumBitModelTotalBits 11
|
||||
#define kBitModelTotal (1 << kNumBitModelTotalBits)
|
||||
|
||||
#define RC_INIT_SIZE 5
|
||||
|
||||
#ifndef _LZMA_DEC_OPT
|
||||
#ifndef Z7_LZMA_DEC_OPT
|
||||
|
||||
#define kNumMoveBits 5
|
||||
#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
|
||||
|
@ -25,14 +25,14 @@
|
|||
#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
|
||||
#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
|
||||
#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
|
||||
{ UPDATE_0(p); i = (i + i); A0; } else \
|
||||
{ UPDATE_1(p); i = (i + i) + 1; A1; }
|
||||
{ UPDATE_0(p) i = (i + i); A0; } else \
|
||||
{ UPDATE_1(p) i = (i + i) + 1; A1; }
|
||||
|
||||
#define TREE_GET_BIT(probs, i) { GET_BIT2(probs + i, i, ;, ;); }
|
||||
|
||||
#define REV_BIT(p, i, A0, A1) IF_BIT_0(p + i) \
|
||||
{ UPDATE_0(p + i); A0; } else \
|
||||
{ UPDATE_1(p + i); A1; }
|
||||
{ UPDATE_0(p + i) A0; } else \
|
||||
{ UPDATE_1(p + i) A1; }
|
||||
#define REV_BIT_VAR( p, i, m) REV_BIT(p, i, i += m; m += m, m += m; i += m; )
|
||||
#define REV_BIT_CONST(p, i, m) REV_BIT(p, i, i += m; , i += m * 2; )
|
||||
#define REV_BIT_LAST( p, i, m) REV_BIT(p, i, i -= m , ; )
|
||||
|
@ -40,19 +40,19 @@
|
|||
#define TREE_DECODE(probs, limit, i) \
|
||||
{ i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
|
||||
|
||||
/* #define _LZMA_SIZE_OPT */
|
||||
/* #define Z7_LZMA_SIZE_OPT */
|
||||
|
||||
#ifdef _LZMA_SIZE_OPT
|
||||
#ifdef Z7_LZMA_SIZE_OPT
|
||||
#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
|
||||
#else
|
||||
#define TREE_6_DECODE(probs, i) \
|
||||
{ i = 1; \
|
||||
TREE_GET_BIT(probs, i); \
|
||||
TREE_GET_BIT(probs, i); \
|
||||
TREE_GET_BIT(probs, i); \
|
||||
TREE_GET_BIT(probs, i); \
|
||||
TREE_GET_BIT(probs, i); \
|
||||
TREE_GET_BIT(probs, i); \
|
||||
TREE_GET_BIT(probs, i) \
|
||||
TREE_GET_BIT(probs, i) \
|
||||
TREE_GET_BIT(probs, i) \
|
||||
TREE_GET_BIT(probs, i) \
|
||||
TREE_GET_BIT(probs, i) \
|
||||
TREE_GET_BIT(probs, i) \
|
||||
i -= 0x40; }
|
||||
#endif
|
||||
|
||||
|
@ -64,25 +64,25 @@
|
|||
probLit = prob + (offs + bit + symbol); \
|
||||
GET_BIT2(probLit, symbol, offs ^= bit; , ;)
|
||||
|
||||
#endif // _LZMA_DEC_OPT
|
||||
#endif // Z7_LZMA_DEC_OPT
|
||||
|
||||
|
||||
#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_INPUT_EOF; range <<= 8; code = (code << 8) | (*buf++); }
|
||||
|
||||
#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
|
||||
#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
|
||||
#define UPDATE_0_CHECK range = bound;
|
||||
#define UPDATE_1_CHECK range -= bound; code -= bound;
|
||||
#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
|
||||
{ UPDATE_0_CHECK; i = (i + i); A0; } else \
|
||||
{ UPDATE_1_CHECK; i = (i + i) + 1; A1; }
|
||||
{ UPDATE_0_CHECK i = (i + i); A0; } else \
|
||||
{ UPDATE_1_CHECK i = (i + i) + 1; A1; }
|
||||
#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
|
||||
#define TREE_DECODE_CHECK(probs, limit, i) \
|
||||
{ i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
|
||||
|
||||
|
||||
#define REV_BIT_CHECK(p, i, m) IF_BIT_0_CHECK(p + i) \
|
||||
{ UPDATE_0_CHECK; i += m; m += m; } else \
|
||||
{ UPDATE_1_CHECK; m += m; i += m; }
|
||||
{ UPDATE_0_CHECK i += m; m += m; } else \
|
||||
{ UPDATE_1_CHECK m += m; i += m; }
|
||||
|
||||
|
||||
#define kNumPosBitsMax 4
|
||||
|
@ -224,14 +224,14 @@ Out:
|
|||
*/
|
||||
|
||||
|
||||
#ifdef _LZMA_DEC_OPT
|
||||
#ifdef Z7_LZMA_DEC_OPT
|
||||
|
||||
int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit);
|
||||
int Z7_FASTCALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit);
|
||||
|
||||
#else
|
||||
|
||||
static
|
||||
int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
|
||||
int Z7_FASTCALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
|
||||
{
|
||||
CLzmaProb *probs = GET_PROBS;
|
||||
unsigned state = (unsigned)p->state;
|
||||
|
@ -263,7 +263,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
IF_BIT_0(prob)
|
||||
{
|
||||
unsigned symbol;
|
||||
UPDATE_0(prob);
|
||||
UPDATE_0(prob)
|
||||
prob = probs + Literal;
|
||||
if (processedPos != 0 || checkDicSize != 0)
|
||||
prob += (UInt32)3 * ((((processedPos << 8) + dic[(dicPos == 0 ? dicBufSize : dicPos) - 1]) & lpMask) << lc);
|
||||
|
@ -273,7 +273,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
{
|
||||
state -= (state < 4) ? state : 3;
|
||||
symbol = 1;
|
||||
#ifdef _LZMA_SIZE_OPT
|
||||
#ifdef Z7_LZMA_SIZE_OPT
|
||||
do { NORMAL_LITER_DEC } while (symbol < 0x100);
|
||||
#else
|
||||
NORMAL_LITER_DEC
|
||||
|
@ -292,7 +292,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
unsigned offs = 0x100;
|
||||
state -= (state < 10) ? 3 : 6;
|
||||
symbol = 1;
|
||||
#ifdef _LZMA_SIZE_OPT
|
||||
#ifdef Z7_LZMA_SIZE_OPT
|
||||
do
|
||||
{
|
||||
unsigned bit;
|
||||
|
@ -321,25 +321,25 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
}
|
||||
|
||||
{
|
||||
UPDATE_1(prob);
|
||||
UPDATE_1(prob)
|
||||
prob = probs + IsRep + state;
|
||||
IF_BIT_0(prob)
|
||||
{
|
||||
UPDATE_0(prob);
|
||||
UPDATE_0(prob)
|
||||
state += kNumStates;
|
||||
prob = probs + LenCoder;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(prob);
|
||||
UPDATE_1(prob)
|
||||
prob = probs + IsRepG0 + state;
|
||||
IF_BIT_0(prob)
|
||||
{
|
||||
UPDATE_0(prob);
|
||||
UPDATE_0(prob)
|
||||
prob = probs + IsRep0Long + COMBINED_PS_STATE;
|
||||
IF_BIT_0(prob)
|
||||
{
|
||||
UPDATE_0(prob);
|
||||
UPDATE_0(prob)
|
||||
|
||||
// that case was checked before with kBadRepCode
|
||||
// if (checkDicSize == 0 && processedPos == 0) { len = kMatchSpecLen_Error_Data + 1; break; }
|
||||
|
@ -353,30 +353,30 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
state = state < kNumLitStates ? 9 : 11;
|
||||
continue;
|
||||
}
|
||||
UPDATE_1(prob);
|
||||
UPDATE_1(prob)
|
||||
}
|
||||
else
|
||||
{
|
||||
UInt32 distance;
|
||||
UPDATE_1(prob);
|
||||
UPDATE_1(prob)
|
||||
prob = probs + IsRepG1 + state;
|
||||
IF_BIT_0(prob)
|
||||
{
|
||||
UPDATE_0(prob);
|
||||
UPDATE_0(prob)
|
||||
distance = rep1;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(prob);
|
||||
UPDATE_1(prob)
|
||||
prob = probs + IsRepG2 + state;
|
||||
IF_BIT_0(prob)
|
||||
{
|
||||
UPDATE_0(prob);
|
||||
UPDATE_0(prob)
|
||||
distance = rep2;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(prob);
|
||||
UPDATE_1(prob)
|
||||
distance = rep3;
|
||||
rep3 = rep2;
|
||||
}
|
||||
|
@ -389,37 +389,37 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
prob = probs + RepLenCoder;
|
||||
}
|
||||
|
||||
#ifdef _LZMA_SIZE_OPT
|
||||
#ifdef Z7_LZMA_SIZE_OPT
|
||||
{
|
||||
unsigned lim, offset;
|
||||
CLzmaProb *probLen = prob + LenChoice;
|
||||
IF_BIT_0(probLen)
|
||||
{
|
||||
UPDATE_0(probLen);
|
||||
UPDATE_0(probLen)
|
||||
probLen = prob + LenLow + GET_LEN_STATE;
|
||||
offset = 0;
|
||||
lim = (1 << kLenNumLowBits);
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(probLen);
|
||||
UPDATE_1(probLen)
|
||||
probLen = prob + LenChoice2;
|
||||
IF_BIT_0(probLen)
|
||||
{
|
||||
UPDATE_0(probLen);
|
||||
UPDATE_0(probLen)
|
||||
probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
|
||||
offset = kLenNumLowSymbols;
|
||||
lim = (1 << kLenNumLowBits);
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(probLen);
|
||||
UPDATE_1(probLen)
|
||||
probLen = prob + LenHigh;
|
||||
offset = kLenNumLowSymbols * 2;
|
||||
lim = (1 << kLenNumHighBits);
|
||||
}
|
||||
}
|
||||
TREE_DECODE(probLen, lim, len);
|
||||
TREE_DECODE(probLen, lim, len)
|
||||
len += offset;
|
||||
}
|
||||
#else
|
||||
|
@ -427,32 +427,32 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
CLzmaProb *probLen = prob + LenChoice;
|
||||
IF_BIT_0(probLen)
|
||||
{
|
||||
UPDATE_0(probLen);
|
||||
UPDATE_0(probLen)
|
||||
probLen = prob + LenLow + GET_LEN_STATE;
|
||||
len = 1;
|
||||
TREE_GET_BIT(probLen, len);
|
||||
TREE_GET_BIT(probLen, len);
|
||||
TREE_GET_BIT(probLen, len);
|
||||
TREE_GET_BIT(probLen, len)
|
||||
TREE_GET_BIT(probLen, len)
|
||||
TREE_GET_BIT(probLen, len)
|
||||
len -= 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(probLen);
|
||||
UPDATE_1(probLen)
|
||||
probLen = prob + LenChoice2;
|
||||
IF_BIT_0(probLen)
|
||||
{
|
||||
UPDATE_0(probLen);
|
||||
UPDATE_0(probLen)
|
||||
probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
|
||||
len = 1;
|
||||
TREE_GET_BIT(probLen, len);
|
||||
TREE_GET_BIT(probLen, len);
|
||||
TREE_GET_BIT(probLen, len);
|
||||
TREE_GET_BIT(probLen, len)
|
||||
TREE_GET_BIT(probLen, len)
|
||||
TREE_GET_BIT(probLen, len)
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1(probLen);
|
||||
UPDATE_1(probLen)
|
||||
probLen = prob + LenHigh;
|
||||
TREE_DECODE(probLen, (1 << kLenNumHighBits), len);
|
||||
TREE_DECODE(probLen, (1 << kLenNumHighBits), len)
|
||||
len += kLenNumLowSymbols * 2;
|
||||
}
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
UInt32 distance;
|
||||
prob = probs + PosSlot +
|
||||
((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
|
||||
TREE_6_DECODE(prob, distance);
|
||||
TREE_6_DECODE(prob, distance)
|
||||
if (distance >= kStartPosModelIndex)
|
||||
{
|
||||
unsigned posSlot = (unsigned)distance;
|
||||
|
@ -479,7 +479,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
distance++;
|
||||
do
|
||||
{
|
||||
REV_BIT_VAR(prob, distance, m);
|
||||
REV_BIT_VAR(prob, distance, m)
|
||||
}
|
||||
while (--numDirectBits);
|
||||
distance -= m;
|
||||
|
@ -514,10 +514,10 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
distance <<= kNumAlignBits;
|
||||
{
|
||||
unsigned i = 1;
|
||||
REV_BIT_CONST(prob, i, 1);
|
||||
REV_BIT_CONST(prob, i, 2);
|
||||
REV_BIT_CONST(prob, i, 4);
|
||||
REV_BIT_LAST (prob, i, 8);
|
||||
REV_BIT_CONST(prob, i, 1)
|
||||
REV_BIT_CONST(prob, i, 2)
|
||||
REV_BIT_CONST(prob, i, 4)
|
||||
REV_BIT_LAST (prob, i, 8)
|
||||
distance |= i;
|
||||
}
|
||||
if (distance == (UInt32)0xFFFFFFFF)
|
||||
|
@ -592,7 +592,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
}
|
||||
while (dicPos < limit && buf < bufLimit);
|
||||
|
||||
NORMALIZE;
|
||||
NORMALIZE
|
||||
|
||||
p->buf = buf;
|
||||
p->range = range;
|
||||
|
@ -613,7 +613,7 @@ int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit
|
|||
|
||||
|
||||
|
||||
static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
|
||||
static void Z7_FASTCALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
|
||||
{
|
||||
unsigned len = (unsigned)p->remainLen;
|
||||
if (len == 0 /* || len >= kMatchSpecLenStart */)
|
||||
|
@ -683,7 +683,7 @@ and we support the following state of (p->checkDicSize):
|
|||
(p->checkDicSize == p->prop.dicSize)
|
||||
*/
|
||||
|
||||
static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
|
||||
static int Z7_FASTCALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
|
||||
{
|
||||
if (p->checkDicSize == 0)
|
||||
{
|
||||
|
@ -767,54 +767,54 @@ static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byt
|
|||
else
|
||||
{
|
||||
unsigned len;
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
|
||||
prob = probs + IsRep + state;
|
||||
IF_BIT_0_CHECK(prob)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
state = 0;
|
||||
prob = probs + LenCoder;
|
||||
res = DUMMY_MATCH;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
res = DUMMY_REP;
|
||||
prob = probs + IsRepG0 + state;
|
||||
IF_BIT_0_CHECK(prob)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
prob = probs + IsRep0Long + COMBINED_PS_STATE;
|
||||
IF_BIT_0_CHECK(prob)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
prob = probs + IsRepG1 + state;
|
||||
IF_BIT_0_CHECK(prob)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
prob = probs + IsRepG2 + state;
|
||||
IF_BIT_0_CHECK(prob)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -826,31 +826,31 @@ static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byt
|
|||
const CLzmaProb *probLen = prob + LenChoice;
|
||||
IF_BIT_0_CHECK(probLen)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
probLen = prob + LenLow + GET_LEN_STATE;
|
||||
offset = 0;
|
||||
limit = 1 << kLenNumLowBits;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
probLen = prob + LenChoice2;
|
||||
IF_BIT_0_CHECK(probLen)
|
||||
{
|
||||
UPDATE_0_CHECK;
|
||||
UPDATE_0_CHECK
|
||||
probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
|
||||
offset = kLenNumLowSymbols;
|
||||
limit = 1 << kLenNumLowBits;
|
||||
}
|
||||
else
|
||||
{
|
||||
UPDATE_1_CHECK;
|
||||
UPDATE_1_CHECK
|
||||
probLen = prob + LenHigh;
|
||||
offset = kLenNumLowSymbols * 2;
|
||||
limit = 1 << kLenNumHighBits;
|
||||
}
|
||||
}
|
||||
TREE_DECODE_CHECK(probLen, limit, len);
|
||||
TREE_DECODE_CHECK(probLen, limit, len)
|
||||
len += offset;
|
||||
}
|
||||
|
||||
|
@ -860,7 +860,7 @@ static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byt
|
|||
prob = probs + PosSlot +
|
||||
((len < kNumLenToPosStates - 1 ? len : kNumLenToPosStates - 1) <<
|
||||
kNumPosSlotBits);
|
||||
TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
|
||||
TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot)
|
||||
if (posSlot >= kStartPosModelIndex)
|
||||
{
|
||||
unsigned numDirectBits = ((posSlot >> 1) - 1);
|
||||
|
@ -888,7 +888,7 @@ static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byt
|
|||
unsigned m = 1;
|
||||
do
|
||||
{
|
||||
REV_BIT_CHECK(prob, i, m);
|
||||
REV_BIT_CHECK(prob, i, m)
|
||||
}
|
||||
while (--numDirectBits);
|
||||
}
|
||||
|
@ -897,7 +897,7 @@ static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byt
|
|||
}
|
||||
break;
|
||||
}
|
||||
NORMALIZE_CHECK;
|
||||
NORMALIZE_CHECK
|
||||
|
||||
*bufOut = buf;
|
||||
return res;
|
||||
|
@ -943,7 +943,7 @@ When the decoder lookahead, and the lookahead symbol is not end_marker, we have
|
|||
*/
|
||||
|
||||
|
||||
#define RETURN__NOT_FINISHED__FOR_FINISH \
|
||||
#define RETURN_NOT_FINISHED_FOR_FINISH \
|
||||
*status = LZMA_STATUS_NOT_FINISHED; \
|
||||
return SZ_ERROR_DATA; // for strict mode
|
||||
// return SZ_OK; // for relaxed mode
|
||||
|
@ -1029,7 +1029,7 @@ SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *sr
|
|||
}
|
||||
if (p->remainLen != 0)
|
||||
{
|
||||
RETURN__NOT_FINISHED__FOR_FINISH;
|
||||
RETURN_NOT_FINISHED_FOR_FINISH
|
||||
}
|
||||
checkEndMarkNow = 1;
|
||||
}
|
||||
|
@ -1072,7 +1072,7 @@ SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *sr
|
|||
for (i = 0; i < (unsigned)dummyProcessed; i++)
|
||||
p->tempBuf[i] = src[i];
|
||||
// p->remainLen = kMatchSpecLen_Error_Data;
|
||||
RETURN__NOT_FINISHED__FOR_FINISH;
|
||||
RETURN_NOT_FINISHED_FOR_FINISH
|
||||
}
|
||||
|
||||
bufLimit = src;
|
||||
|
@ -1150,7 +1150,7 @@ SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *sr
|
|||
(*srcLen) += (unsigned)dummyProcessed - p->tempBufSize;
|
||||
p->tempBufSize = (unsigned)dummyProcessed;
|
||||
// p->remainLen = kMatchSpecLen_Error_Data;
|
||||
RETURN__NOT_FINISHED__FOR_FINISH;
|
||||
RETURN_NOT_FINISHED_FOR_FINISH
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1299,8 +1299,8 @@ static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAl
|
|||
SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
|
||||
{
|
||||
CLzmaProps propNew;
|
||||
RINOK(LzmaProps_Decode(&propNew, props, propsSize));
|
||||
RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
|
||||
RINOK(LzmaProps_Decode(&propNew, props, propsSize))
|
||||
RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc))
|
||||
p->prop = propNew;
|
||||
return SZ_OK;
|
||||
}
|
||||
|
@ -1309,14 +1309,14 @@ SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAll
|
|||
{
|
||||
CLzmaProps propNew;
|
||||
SizeT dicBufSize;
|
||||
RINOK(LzmaProps_Decode(&propNew, props, propsSize));
|
||||
RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
|
||||
RINOK(LzmaProps_Decode(&propNew, props, propsSize))
|
||||
RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc))
|
||||
|
||||
{
|
||||
UInt32 dictSize = propNew.dicSize;
|
||||
SizeT mask = ((UInt32)1 << 12) - 1;
|
||||
if (dictSize >= ((UInt32)1 << 30)) mask = ((UInt32)1 << 22) - 1;
|
||||
else if (dictSize >= ((UInt32)1 << 22)) mask = ((UInt32)1 << 20) - 1;;
|
||||
else if (dictSize >= ((UInt32)1 << 22)) mask = ((UInt32)1 << 20) - 1;
|
||||
dicBufSize = ((SizeT)dictSize + mask) & ~mask;
|
||||
if (dicBufSize < dictSize)
|
||||
dicBufSize = dictSize;
|
||||
|
@ -1348,8 +1348,8 @@ SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
|
|||
*status = LZMA_STATUS_NOT_SPECIFIED;
|
||||
if (inSize < RC_INIT_SIZE)
|
||||
return SZ_ERROR_INPUT_EOF;
|
||||
LzmaDec_Construct(&p);
|
||||
RINOK(LzmaDec_AllocateProbs(&p, propData, propSize, alloc));
|
||||
LzmaDec_CONSTRUCT(&p)
|
||||
RINOK(LzmaDec_AllocateProbs(&p, propData, propSize, alloc))
|
||||
p.dic = dest;
|
||||
p.dicBufSize = outSize;
|
||||
LzmaDec_Init(&p);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* LzmaEnc.c -- LZMA Encoder
|
||||
2021-11-18: Igor Pavlov : Public domain */
|
||||
2023-04-13: Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -16,22 +16,22 @@
|
|||
#include "LzmaEnc.h"
|
||||
|
||||
#include "LzFind.h"
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
#include "LzFindMt.h"
|
||||
#endif
|
||||
|
||||
/* the following LzmaEnc_* declarations is internal LZMA interface for LZMA2 encoder */
|
||||
|
||||
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ISeqInStream *inStream, UInt32 keepWindowSize,
|
||||
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p, ISeqInStreamPtr inStream, UInt32 keepWindowSize,
|
||||
ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
|
||||
SRes LzmaEnc_MemPrepare(CLzmaEncHandle p, const Byte *src, SizeT srcLen,
|
||||
UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
|
||||
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
|
||||
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
|
||||
Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
|
||||
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp);
|
||||
void LzmaEnc_Finish(CLzmaEncHandle pp);
|
||||
void LzmaEnc_SaveState(CLzmaEncHandle pp);
|
||||
void LzmaEnc_RestoreState(CLzmaEncHandle pp);
|
||||
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p);
|
||||
void LzmaEnc_Finish(CLzmaEncHandle p);
|
||||
void LzmaEnc_SaveState(CLzmaEncHandle p);
|
||||
void LzmaEnc_RestoreState(CLzmaEncHandle p);
|
||||
|
||||
#ifdef SHOW_STAT
|
||||
static unsigned g_STAT_OFFSET = 0;
|
||||
|
@ -40,8 +40,8 @@ static unsigned g_STAT_OFFSET = 0;
|
|||
/* for good normalization speed we still reserve 256 MB before 4 GB range */
|
||||
#define kLzmaMaxHistorySize ((UInt32)15 << 28)
|
||||
|
||||
#define kNumTopBits 24
|
||||
#define kTopValue ((UInt32)1 << kNumTopBits)
|
||||
// #define kNumTopBits 24
|
||||
#define kTopValue ((UInt32)1 << 24)
|
||||
|
||||
#define kNumBitModelTotalBits 11
|
||||
#define kBitModelTotal (1 << kNumBitModelTotalBits)
|
||||
|
@ -60,6 +60,7 @@ void LzmaEncProps_Init(CLzmaEncProps *p)
|
|||
p->dictSize = p->mc = 0;
|
||||
p->reduceSize = (UInt64)(Int64)-1;
|
||||
p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
|
||||
p->numHashOutBits = 0;
|
||||
p->writeEndMark = 0;
|
||||
p->affinity = 0;
|
||||
}
|
||||
|
@ -99,7 +100,7 @@ void LzmaEncProps_Normalize(CLzmaEncProps *p)
|
|||
|
||||
if (p->numThreads < 0)
|
||||
p->numThreads =
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
((p->btMode && p->algo) ? 2 : 1);
|
||||
#else
|
||||
1;
|
||||
|
@ -293,7 +294,7 @@ typedef struct
|
|||
#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
|
||||
|
||||
typedef
|
||||
#ifdef _LZMA_PROB32
|
||||
#ifdef Z7_LZMA_PROB32
|
||||
UInt32
|
||||
#else
|
||||
UInt16
|
||||
|
@ -350,7 +351,7 @@ typedef struct
|
|||
Byte *buf;
|
||||
Byte *bufLim;
|
||||
Byte *bufBase;
|
||||
ISeqOutStream *outStream;
|
||||
ISeqOutStreamPtr outStream;
|
||||
UInt64 processed;
|
||||
SRes res;
|
||||
} CRangeEnc;
|
||||
|
@ -383,7 +384,7 @@ typedef struct
|
|||
typedef UInt32 CProbPrice;
|
||||
|
||||
|
||||
typedef struct
|
||||
struct CLzmaEnc
|
||||
{
|
||||
void *matchFinderObj;
|
||||
IMatchFinder2 matchFinder;
|
||||
|
@ -426,7 +427,7 @@ typedef struct
|
|||
UInt32 dictSize;
|
||||
SRes result;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
BoolInt mtMode;
|
||||
// begin of CMatchFinderMt is used in LZ thread
|
||||
CMatchFinderMt matchFinderMt;
|
||||
|
@ -439,7 +440,7 @@ typedef struct
|
|||
|
||||
// we suppose that we have 8-bytes alignment after CMatchFinder
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
Byte pad[128];
|
||||
#endif
|
||||
|
||||
|
@ -479,77 +480,59 @@ typedef struct
|
|||
CSaveState saveState;
|
||||
|
||||
// BoolInt mf_Failure;
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
Byte pad2[128];
|
||||
#endif
|
||||
} CLzmaEnc;
|
||||
};
|
||||
|
||||
|
||||
#define MFB (p->matchFinderBase)
|
||||
/*
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
#define MFB (p->matchFinderMt.MatchFinder)
|
||||
#endif
|
||||
*/
|
||||
|
||||
#define COPY_ARR(dest, src, arr) memcpy(dest->arr, src->arr, sizeof(src->arr));
|
||||
// #define GET_CLzmaEnc_p CLzmaEnc *p = (CLzmaEnc*)(void *)p;
|
||||
// #define GET_const_CLzmaEnc_p const CLzmaEnc *p = (const CLzmaEnc*)(const void *)p;
|
||||
|
||||
void LzmaEnc_SaveState(CLzmaEncHandle pp)
|
||||
#define COPY_ARR(dest, src, arr) memcpy((dest)->arr, (src)->arr, sizeof((src)->arr));
|
||||
|
||||
#define COPY_LZMA_ENC_STATE(d, s, p) \
|
||||
(d)->state = (s)->state; \
|
||||
COPY_ARR(d, s, reps) \
|
||||
COPY_ARR(d, s, posAlignEncoder) \
|
||||
COPY_ARR(d, s, isRep) \
|
||||
COPY_ARR(d, s, isRepG0) \
|
||||
COPY_ARR(d, s, isRepG1) \
|
||||
COPY_ARR(d, s, isRepG2) \
|
||||
COPY_ARR(d, s, isMatch) \
|
||||
COPY_ARR(d, s, isRep0Long) \
|
||||
COPY_ARR(d, s, posSlotEncoder) \
|
||||
COPY_ARR(d, s, posEncoders) \
|
||||
(d)->lenProbs = (s)->lenProbs; \
|
||||
(d)->repLenProbs = (s)->repLenProbs; \
|
||||
memcpy((d)->litProbs, (s)->litProbs, ((UInt32)0x300 << (p)->lclp) * sizeof(CLzmaProb));
|
||||
|
||||
void LzmaEnc_SaveState(CLzmaEncHandle p)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
CSaveState *dest = &p->saveState;
|
||||
|
||||
dest->state = p->state;
|
||||
|
||||
dest->lenProbs = p->lenProbs;
|
||||
dest->repLenProbs = p->repLenProbs;
|
||||
// GET_CLzmaEnc_p
|
||||
CSaveState *v = &p->saveState;
|
||||
COPY_LZMA_ENC_STATE(v, p, p)
|
||||
}
|
||||
|
||||
COPY_ARR(dest, p, reps);
|
||||
|
||||
COPY_ARR(dest, p, posAlignEncoder);
|
||||
COPY_ARR(dest, p, isRep);
|
||||
COPY_ARR(dest, p, isRepG0);
|
||||
COPY_ARR(dest, p, isRepG1);
|
||||
COPY_ARR(dest, p, isRepG2);
|
||||
COPY_ARR(dest, p, isMatch);
|
||||
COPY_ARR(dest, p, isRep0Long);
|
||||
COPY_ARR(dest, p, posSlotEncoder);
|
||||
COPY_ARR(dest, p, posEncoders);
|
||||
|
||||
memcpy(dest->litProbs, p->litProbs, ((UInt32)0x300 << p->lclp) * sizeof(CLzmaProb));
|
||||
void LzmaEnc_RestoreState(CLzmaEncHandle p)
|
||||
{
|
||||
// GET_CLzmaEnc_p
|
||||
const CSaveState *v = &p->saveState;
|
||||
COPY_LZMA_ENC_STATE(p, v, p)
|
||||
}
|
||||
|
||||
|
||||
void LzmaEnc_RestoreState(CLzmaEncHandle pp)
|
||||
Z7_NO_INLINE
|
||||
SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props2)
|
||||
{
|
||||
CLzmaEnc *dest = (CLzmaEnc *)pp;
|
||||
const CSaveState *p = &dest->saveState;
|
||||
|
||||
dest->state = p->state;
|
||||
|
||||
dest->lenProbs = p->lenProbs;
|
||||
dest->repLenProbs = p->repLenProbs;
|
||||
|
||||
COPY_ARR(dest, p, reps);
|
||||
|
||||
COPY_ARR(dest, p, posAlignEncoder);
|
||||
COPY_ARR(dest, p, isRep);
|
||||
COPY_ARR(dest, p, isRepG0);
|
||||
COPY_ARR(dest, p, isRepG1);
|
||||
COPY_ARR(dest, p, isRepG2);
|
||||
COPY_ARR(dest, p, isMatch);
|
||||
COPY_ARR(dest, p, isRep0Long);
|
||||
COPY_ARR(dest, p, posSlotEncoder);
|
||||
COPY_ARR(dest, p, posEncoders);
|
||||
|
||||
memcpy(dest->litProbs, p->litProbs, ((UInt32)0x300 << dest->lclp) * sizeof(CLzmaProb));
|
||||
}
|
||||
|
||||
|
||||
|
||||
SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
// GET_CLzmaEnc_p
|
||||
CLzmaEncProps props = *props2;
|
||||
LzmaEncProps_Normalize(&props);
|
||||
|
||||
|
@ -585,6 +568,7 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
|
|||
p->fastMode = (props.algo == 0);
|
||||
// p->_maxMode = True;
|
||||
MFB.btMode = (Byte)(props.btMode ? 1 : 0);
|
||||
// MFB.btMode = (Byte)(props.btMode);
|
||||
{
|
||||
unsigned numHashBytes = 4;
|
||||
if (props.btMode)
|
||||
|
@ -595,13 +579,15 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
|
|||
if (props.numHashBytes >= 5) numHashBytes = 5;
|
||||
|
||||
MFB.numHashBytes = numHashBytes;
|
||||
// MFB.numHashBytes_Min = 2;
|
||||
MFB.numHashOutBits = (Byte)props.numHashOutBits;
|
||||
}
|
||||
|
||||
MFB.cutValue = props.mc;
|
||||
|
||||
p->writeEndMark = (BoolInt)props.writeEndMark;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
/*
|
||||
if (newMultiThread != _multiThread)
|
||||
{
|
||||
|
@ -618,9 +604,9 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
|
|||
}
|
||||
|
||||
|
||||
void LzmaEnc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize)
|
||||
void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
// GET_CLzmaEnc_p
|
||||
MFB.expectedDataSize = expectedDataSiize;
|
||||
}
|
||||
|
||||
|
@ -684,7 +670,7 @@ static void RangeEnc_Init(CRangeEnc *p)
|
|||
p->res = SZ_OK;
|
||||
}
|
||||
|
||||
MY_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
|
||||
Z7_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
|
||||
{
|
||||
const size_t num = (size_t)(p->buf - p->bufBase);
|
||||
if (p->res == SZ_OK)
|
||||
|
@ -696,7 +682,7 @@ MY_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
|
|||
p->buf = p->bufBase;
|
||||
}
|
||||
|
||||
MY_NO_INLINE static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
|
||||
Z7_NO_INLINE static void Z7_FASTCALL RangeEnc_ShiftLow(CRangeEnc *p)
|
||||
{
|
||||
UInt32 low = (UInt32)p->low;
|
||||
unsigned high = (unsigned)(p->low >> 32);
|
||||
|
@ -741,9 +727,9 @@ static void RangeEnc_FlushData(CRangeEnc *p)
|
|||
ttt = *(prob); \
|
||||
newBound = (range >> kNumBitModelTotalBits) * ttt;
|
||||
|
||||
// #define _LZMA_ENC_USE_BRANCH
|
||||
// #define Z7_LZMA_ENC_USE_BRANCH
|
||||
|
||||
#ifdef _LZMA_ENC_USE_BRANCH
|
||||
#ifdef Z7_LZMA_ENC_USE_BRANCH
|
||||
|
||||
#define RC_BIT(p, prob, bit) { \
|
||||
RC_BIT_PRE(p, prob) \
|
||||
|
@ -811,7 +797,7 @@ static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 sym)
|
|||
CLzmaProb *prob = probs + (sym >> 8);
|
||||
UInt32 bit = (sym >> 7) & 1;
|
||||
sym <<= 1;
|
||||
RC_BIT(p, prob, bit);
|
||||
RC_BIT(p, prob, bit)
|
||||
}
|
||||
while (sym < 0x10000);
|
||||
p->range = range;
|
||||
|
@ -833,7 +819,7 @@ static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 sym, UIn
|
|||
bit = (sym >> 7) & 1;
|
||||
sym <<= 1;
|
||||
offs &= ~(matchByte ^ sym);
|
||||
RC_BIT(p, prob, bit);
|
||||
RC_BIT(p, prob, bit)
|
||||
}
|
||||
while (sym < 0x10000);
|
||||
p->range = range;
|
||||
|
@ -867,10 +853,10 @@ static void LzmaEnc_InitPriceTables(CProbPrice *ProbPrices)
|
|||
|
||||
|
||||
#define GET_PRICE(prob, bit) \
|
||||
p->ProbPrices[((prob) ^ (unsigned)(((-(int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
|
||||
p->ProbPrices[((prob) ^ (unsigned)(((-(int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]
|
||||
|
||||
#define GET_PRICEa(prob, bit) \
|
||||
ProbPrices[((prob) ^ (unsigned)((-((int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
|
||||
ProbPrices[((prob) ^ (unsigned)((-((int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]
|
||||
|
||||
#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
|
||||
#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
|
||||
|
@ -921,7 +907,7 @@ static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, unsigned numBi
|
|||
unsigned bit = sym & 1;
|
||||
// RangeEnc_EncodeBit(rc, probs + m, bit);
|
||||
sym >>= 1;
|
||||
RC_BIT(rc, probs + m, bit);
|
||||
RC_BIT(rc, probs + m, bit)
|
||||
m = (m << 1) | bit;
|
||||
}
|
||||
while (--numBits);
|
||||
|
@ -944,15 +930,15 @@ static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posS
|
|||
UInt32 range, ttt, newBound;
|
||||
CLzmaProb *probs = p->low;
|
||||
range = rc->range;
|
||||
RC_BIT_PRE(rc, probs);
|
||||
RC_BIT_PRE(rc, probs)
|
||||
if (sym >= kLenNumLowSymbols)
|
||||
{
|
||||
RC_BIT_1(rc, probs);
|
||||
RC_BIT_1(rc, probs)
|
||||
probs += kLenNumLowSymbols;
|
||||
RC_BIT_PRE(rc, probs);
|
||||
RC_BIT_PRE(rc, probs)
|
||||
if (sym >= kLenNumLowSymbols * 2)
|
||||
{
|
||||
RC_BIT_1(rc, probs);
|
||||
RC_BIT_1(rc, probs)
|
||||
rc->range = range;
|
||||
// RcTree_Encode(rc, p->high, kLenNumHighBits, sym - kLenNumLowSymbols * 2);
|
||||
LitEnc_Encode(rc, p->high, sym - kLenNumLowSymbols * 2);
|
||||
|
@ -965,11 +951,11 @@ static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posS
|
|||
{
|
||||
unsigned m;
|
||||
unsigned bit;
|
||||
RC_BIT_0(rc, probs);
|
||||
RC_BIT_0(rc, probs)
|
||||
probs += (posState << (1 + kLenNumLowBits));
|
||||
bit = (sym >> 2) ; RC_BIT(rc, probs + 1, bit); m = (1 << 1) + bit;
|
||||
bit = (sym >> 1) & 1; RC_BIT(rc, probs + m, bit); m = (m << 1) + bit;
|
||||
bit = sym & 1; RC_BIT(rc, probs + m, bit);
|
||||
bit = (sym >> 2) ; RC_BIT(rc, probs + 1, bit) m = (1 << 1) + bit;
|
||||
bit = (sym >> 1) & 1; RC_BIT(rc, probs + m, bit) m = (m << 1) + bit;
|
||||
bit = sym & 1; RC_BIT(rc, probs + m, bit)
|
||||
rc->range = range;
|
||||
}
|
||||
}
|
||||
|
@ -990,7 +976,7 @@ static void SetPrices_3(const CLzmaProb *probs, UInt32 startPrice, UInt32 *price
|
|||
}
|
||||
|
||||
|
||||
MY_NO_INLINE static void MY_FAST_CALL LenPriceEnc_UpdateTables(
|
||||
Z7_NO_INLINE static void Z7_FASTCALL LenPriceEnc_UpdateTables(
|
||||
CLenPriceEnc *p,
|
||||
unsigned numPosStates,
|
||||
const CLenEnc *enc,
|
||||
|
@ -1152,7 +1138,7 @@ static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
|
|||
+ GET_PRICE_1(p->isRep[state]) \
|
||||
+ GET_PRICE_0(p->isRepG0[state])
|
||||
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
static UInt32 GetPrice_PureRep(const CLzmaEnc *p, unsigned repIndex, size_t state, size_t posState)
|
||||
{
|
||||
UInt32 price;
|
||||
|
@ -1331,7 +1317,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
LitEnc_GetPrice(probs, curByte, p->ProbPrices));
|
||||
}
|
||||
|
||||
MakeAs_Lit(&p->opt[1]);
|
||||
MakeAs_Lit(&p->opt[1])
|
||||
|
||||
matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
|
||||
repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
|
||||
|
@ -1343,7 +1329,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
if (shortRepPrice < p->opt[1].price)
|
||||
{
|
||||
p->opt[1].price = shortRepPrice;
|
||||
MakeAs_ShortRep(&p->opt[1]);
|
||||
MakeAs_ShortRep(&p->opt[1])
|
||||
}
|
||||
if (last < 2)
|
||||
{
|
||||
|
@ -1410,7 +1396,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
else
|
||||
{
|
||||
unsigned slot;
|
||||
GetPosSlot2(dist, slot);
|
||||
GetPosSlot2(dist, slot)
|
||||
price += p->alignPrices[dist & kAlignMask];
|
||||
price += p->posSlotPrices[lenToPosState][slot];
|
||||
}
|
||||
|
@ -1486,7 +1472,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
unsigned delta = best - cur;
|
||||
if (delta != 0)
|
||||
{
|
||||
MOVE_POS(p, delta);
|
||||
MOVE_POS(p, delta)
|
||||
}
|
||||
}
|
||||
cur = best;
|
||||
|
@ -1633,7 +1619,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
{
|
||||
nextOpt->price = litPrice;
|
||||
nextOpt->len = 1;
|
||||
MakeAs_Lit(nextOpt);
|
||||
MakeAs_Lit(nextOpt)
|
||||
nextIsLit = True;
|
||||
}
|
||||
}
|
||||
|
@ -1667,7 +1653,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
{
|
||||
nextOpt->price = shortRepPrice;
|
||||
nextOpt->len = 1;
|
||||
MakeAs_ShortRep(nextOpt);
|
||||
MakeAs_ShortRep(nextOpt)
|
||||
nextIsLit = False;
|
||||
}
|
||||
}
|
||||
|
@ -1871,7 +1857,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
dist = MATCHES[(size_t)offs + 1];
|
||||
|
||||
// if (dist >= kNumFullDistances)
|
||||
GetPosSlot2(dist, posSlot);
|
||||
GetPosSlot2(dist, posSlot)
|
||||
|
||||
for (len = /*2*/ startLen; ; len++)
|
||||
{
|
||||
|
@ -1962,7 +1948,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
|
|||
break;
|
||||
dist = MATCHES[(size_t)offs + 1];
|
||||
// if (dist >= kNumFullDistances)
|
||||
GetPosSlot2(dist, posSlot);
|
||||
GetPosSlot2(dist, posSlot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2138,7 +2124,7 @@ static void WriteEndMarker(CLzmaEnc *p, unsigned posState)
|
|||
{
|
||||
UInt32 ttt, newBound;
|
||||
RC_BIT_PRE(p, probs + m)
|
||||
RC_BIT_1(&p->rc, probs + m);
|
||||
RC_BIT_1(&p->rc, probs + m)
|
||||
m = (m << 1) + 1;
|
||||
}
|
||||
while (m < (1 << kNumPosSlotBits));
|
||||
|
@ -2163,7 +2149,7 @@ static void WriteEndMarker(CLzmaEnc *p, unsigned posState)
|
|||
{
|
||||
UInt32 ttt, newBound;
|
||||
RC_BIT_PRE(p, probs + m)
|
||||
RC_BIT_1(&p->rc, probs + m);
|
||||
RC_BIT_1(&p->rc, probs + m)
|
||||
m = (m << 1) + 1;
|
||||
}
|
||||
while (m < kAlignTableSize);
|
||||
|
@ -2179,7 +2165,7 @@ static SRes CheckErrors(CLzmaEnc *p)
|
|||
if (p->rc.res != SZ_OK)
|
||||
p->result = SZ_ERROR_WRITE;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (
|
||||
// p->mf_Failure ||
|
||||
(p->mtMode &&
|
||||
|
@ -2187,7 +2173,7 @@ static SRes CheckErrors(CLzmaEnc *p)
|
|||
p->matchFinderMt.failure_LZ_BT))
|
||||
)
|
||||
{
|
||||
p->result = MY_HRES_ERROR__INTERNAL_ERROR;
|
||||
p->result = MY_HRES_ERROR_INTERNAL_ERROR;
|
||||
// printf("\nCheckErrors p->matchFinderMt.failureLZ\n");
|
||||
}
|
||||
#endif
|
||||
|
@ -2201,7 +2187,7 @@ static SRes CheckErrors(CLzmaEnc *p)
|
|||
}
|
||||
|
||||
|
||||
MY_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
|
||||
Z7_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
|
||||
{
|
||||
/* ReleaseMFStream(); */
|
||||
p->finished = True;
|
||||
|
@ -2213,7 +2199,7 @@ MY_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
|
|||
}
|
||||
|
||||
|
||||
MY_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
|
||||
Z7_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
|
||||
{
|
||||
unsigned i;
|
||||
const CProbPrice *ProbPrices = p->ProbPrices;
|
||||
|
@ -2237,7 +2223,7 @@ MY_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
|
|||
}
|
||||
|
||||
|
||||
MY_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
|
||||
Z7_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
|
||||
{
|
||||
// int y; for (y = 0; y < 100; y++) {
|
||||
|
||||
|
@ -2337,7 +2323,7 @@ static void LzmaEnc_Construct(CLzmaEnc *p)
|
|||
RangeEnc_Construct(&p->rc);
|
||||
MatchFinder_Construct(&MFB);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->matchFinderMt.MatchFinder = &MFB;
|
||||
MatchFinderMt_Construct(&p->matchFinderMt);
|
||||
#endif
|
||||
|
@ -2345,7 +2331,7 @@ static void LzmaEnc_Construct(CLzmaEnc *p)
|
|||
{
|
||||
CLzmaEncProps props;
|
||||
LzmaEncProps_Init(&props);
|
||||
LzmaEnc_SetProps(p, &props);
|
||||
LzmaEnc_SetProps((CLzmaEncHandle)(void *)p, &props);
|
||||
}
|
||||
|
||||
#ifndef LZMA_LOG_BSR
|
||||
|
@ -2376,7 +2362,7 @@ static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAllocPtr alloc)
|
|||
|
||||
static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
|
||||
#endif
|
||||
|
||||
|
@ -2387,21 +2373,22 @@ static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBi
|
|||
|
||||
void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
|
||||
// GET_CLzmaEnc_p
|
||||
LzmaEnc_Destruct(p, alloc, allocBig);
|
||||
ISzAlloc_Free(alloc, p);
|
||||
}
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpackSize)
|
||||
{
|
||||
UInt32 nowPos32, startPos32;
|
||||
if (p->needInit)
|
||||
{
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (p->mtMode)
|
||||
{
|
||||
RINOK(MatchFinderMt_InitMt(&p->matchFinderMt));
|
||||
RINOK(MatchFinderMt_InitMt(&p->matchFinderMt))
|
||||
}
|
||||
#endif
|
||||
p->matchFinder.Init(p->matchFinderObj);
|
||||
|
@ -2410,7 +2397,7 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
|
||||
if (p->finished)
|
||||
return p->result;
|
||||
RINOK(CheckErrors(p));
|
||||
RINOK(CheckErrors(p))
|
||||
|
||||
nowPos32 = (UInt32)p->nowPos64;
|
||||
startPos32 = nowPos32;
|
||||
|
@ -2473,7 +2460,7 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
const Byte *data;
|
||||
unsigned state;
|
||||
|
||||
RC_BIT_0(&p->rc, probs);
|
||||
RC_BIT_0(&p->rc, probs)
|
||||
p->rc.range = range;
|
||||
data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
|
||||
probs = LIT_PROBS(nowPos32, *(data - 1));
|
||||
|
@ -2487,53 +2474,53 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
}
|
||||
else
|
||||
{
|
||||
RC_BIT_1(&p->rc, probs);
|
||||
RC_BIT_1(&p->rc, probs)
|
||||
probs = &p->isRep[p->state];
|
||||
RC_BIT_PRE(&p->rc, probs)
|
||||
|
||||
if (dist < LZMA_NUM_REPS)
|
||||
{
|
||||
RC_BIT_1(&p->rc, probs);
|
||||
RC_BIT_1(&p->rc, probs)
|
||||
probs = &p->isRepG0[p->state];
|
||||
RC_BIT_PRE(&p->rc, probs)
|
||||
if (dist == 0)
|
||||
{
|
||||
RC_BIT_0(&p->rc, probs);
|
||||
RC_BIT_0(&p->rc, probs)
|
||||
probs = &p->isRep0Long[p->state][posState];
|
||||
RC_BIT_PRE(&p->rc, probs)
|
||||
if (len != 1)
|
||||
{
|
||||
RC_BIT_1_BASE(&p->rc, probs);
|
||||
RC_BIT_1_BASE(&p->rc, probs)
|
||||
}
|
||||
else
|
||||
{
|
||||
RC_BIT_0_BASE(&p->rc, probs);
|
||||
RC_BIT_0_BASE(&p->rc, probs)
|
||||
p->state = kShortRepNextStates[p->state];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
RC_BIT_1(&p->rc, probs);
|
||||
RC_BIT_1(&p->rc, probs)
|
||||
probs = &p->isRepG1[p->state];
|
||||
RC_BIT_PRE(&p->rc, probs)
|
||||
if (dist == 1)
|
||||
{
|
||||
RC_BIT_0_BASE(&p->rc, probs);
|
||||
RC_BIT_0_BASE(&p->rc, probs)
|
||||
dist = p->reps[1];
|
||||
}
|
||||
else
|
||||
{
|
||||
RC_BIT_1(&p->rc, probs);
|
||||
RC_BIT_1(&p->rc, probs)
|
||||
probs = &p->isRepG2[p->state];
|
||||
RC_BIT_PRE(&p->rc, probs)
|
||||
if (dist == 2)
|
||||
{
|
||||
RC_BIT_0_BASE(&p->rc, probs);
|
||||
RC_BIT_0_BASE(&p->rc, probs)
|
||||
dist = p->reps[2];
|
||||
}
|
||||
else
|
||||
{
|
||||
RC_BIT_1_BASE(&p->rc, probs);
|
||||
RC_BIT_1_BASE(&p->rc, probs)
|
||||
dist = p->reps[3];
|
||||
p->reps[3] = p->reps[2];
|
||||
}
|
||||
|
@ -2557,7 +2544,7 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
else
|
||||
{
|
||||
unsigned posSlot;
|
||||
RC_BIT_0(&p->rc, probs);
|
||||
RC_BIT_0(&p->rc, probs)
|
||||
p->rc.range = range;
|
||||
p->state = kMatchNextStates[p->state];
|
||||
|
||||
|
@ -2571,7 +2558,7 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
p->reps[0] = dist + 1;
|
||||
|
||||
p->matchPriceCount++;
|
||||
GetPosSlot(dist, posSlot);
|
||||
GetPosSlot(dist, posSlot)
|
||||
// RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], posSlot);
|
||||
{
|
||||
UInt32 sym = (UInt32)posSlot + (1 << kNumPosSlotBits);
|
||||
|
@ -2582,7 +2569,7 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
CLzmaProb *prob = probs + (sym >> kNumPosSlotBits);
|
||||
UInt32 bit = (sym >> (kNumPosSlotBits - 1)) & 1;
|
||||
sym <<= 1;
|
||||
RC_BIT(&p->rc, prob, bit);
|
||||
RC_BIT(&p->rc, prob, bit)
|
||||
}
|
||||
while (sym < (1 << kNumPosSlotBits * 2));
|
||||
p->rc.range = range;
|
||||
|
@ -2626,10 +2613,10 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
|
|||
{
|
||||
unsigned m = 1;
|
||||
unsigned bit;
|
||||
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit;
|
||||
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit;
|
||||
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit;
|
||||
bit = dist & 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit);
|
||||
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
|
||||
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
|
||||
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
|
||||
bit = dist & 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit)
|
||||
p->rc.range = range;
|
||||
// p->alignPriceCount++;
|
||||
}
|
||||
|
@ -2704,7 +2691,7 @@ static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc,
|
|||
if (!RangeEnc_Alloc(&p->rc, alloc))
|
||||
return SZ_ERROR_MEM;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->mtMode = (p->multiThread && !p->fastMode && (MFB.btMode != 0));
|
||||
#endif
|
||||
|
||||
|
@ -2748,15 +2735,14 @@ static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc,
|
|||
(numFastBytes + LZMA_MATCH_LEN_MAX + 1)
|
||||
*/
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (p->mtMode)
|
||||
{
|
||||
RINOK(MatchFinderMt_Create(&p->matchFinderMt, dictSize, beforeSize,
|
||||
p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 18.04 */
|
||||
, allocBig));
|
||||
, allocBig))
|
||||
p->matchFinderObj = &p->matchFinderMt;
|
||||
MFB.bigHash = (Byte)(
|
||||
(p->dictSize > kBigHashDicLimit && MFB.hashMask >= 0xFFFFFF) ? 1 : 0);
|
||||
MFB.bigHash = (Byte)(MFB.hashMask >= 0xFFFFFF ? 1 : 0);
|
||||
MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
|
||||
}
|
||||
else
|
||||
|
@ -2872,59 +2858,53 @@ static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr
|
|||
|
||||
p->finished = False;
|
||||
p->result = SZ_OK;
|
||||
RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
|
||||
p->nowPos64 = 0;
|
||||
p->needInit = 1;
|
||||
RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig))
|
||||
LzmaEnc_Init(p);
|
||||
LzmaEnc_InitPrices(p);
|
||||
p->nowPos64 = 0;
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
|
||||
static SRes LzmaEnc_Prepare(CLzmaEncHandle p,
|
||||
ISeqOutStreamPtr outStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
MFB.stream = inStream;
|
||||
p->needInit = 1;
|
||||
// GET_CLzmaEnc_p
|
||||
MatchFinder_SET_STREAM(&MFB, inStream)
|
||||
p->rc.outStream = outStream;
|
||||
return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
|
||||
}
|
||||
|
||||
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
|
||||
ISeqInStream *inStream, UInt32 keepWindowSize,
|
||||
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p,
|
||||
ISeqInStreamPtr inStream, UInt32 keepWindowSize,
|
||||
ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
MFB.stream = inStream;
|
||||
p->needInit = 1;
|
||||
// GET_CLzmaEnc_p
|
||||
MatchFinder_SET_STREAM(&MFB, inStream)
|
||||
return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
|
||||
}
|
||||
|
||||
static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
|
||||
SRes LzmaEnc_MemPrepare(CLzmaEncHandle p,
|
||||
const Byte *src, SizeT srcLen,
|
||||
UInt32 keepWindowSize,
|
||||
ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
MFB.directInput = 1;
|
||||
MFB.bufferBase = (Byte *)src;
|
||||
MFB.directInputRem = srcLen;
|
||||
}
|
||||
|
||||
SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
|
||||
UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
LzmaEnc_SetInputBuf(p, src, srcLen);
|
||||
p->needInit = 1;
|
||||
|
||||
LzmaEnc_SetDataSize(pp, srcLen);
|
||||
// GET_CLzmaEnc_p
|
||||
MatchFinder_SET_DIRECT_INPUT_BUF(&MFB, src, srcLen)
|
||||
LzmaEnc_SetDataSize(p, srcLen);
|
||||
return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
|
||||
}
|
||||
|
||||
void LzmaEnc_Finish(CLzmaEncHandle pp)
|
||||
void LzmaEnc_Finish(CLzmaEncHandle p)
|
||||
{
|
||||
#ifndef _7ZIP_ST
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
#ifndef Z7_ST
|
||||
// GET_CLzmaEnc_p
|
||||
if (p->mtMode)
|
||||
MatchFinderMt_ReleaseStream(&p->matchFinderMt);
|
||||
#else
|
||||
UNUSED_VAR(pp);
|
||||
UNUSED_VAR(p)
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2933,13 +2913,13 @@ typedef struct
|
|||
{
|
||||
ISeqOutStream vt;
|
||||
Byte *data;
|
||||
SizeT rem;
|
||||
size_t rem;
|
||||
BoolInt overflow;
|
||||
} CLzmaEnc_SeqOutStreamBuf;
|
||||
|
||||
static size_t SeqOutStreamBuf_Write(const ISeqOutStream *pp, const void *data, size_t size)
|
||||
static size_t SeqOutStreamBuf_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
|
||||
{
|
||||
CLzmaEnc_SeqOutStreamBuf *p = CONTAINER_FROM_VTBL(pp, CLzmaEnc_SeqOutStreamBuf, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLzmaEnc_SeqOutStreamBuf)
|
||||
if (p->rem < size)
|
||||
{
|
||||
size = p->rem;
|
||||
|
@ -2956,24 +2936,25 @@ static size_t SeqOutStreamBuf_Write(const ISeqOutStream *pp, const void *data, s
|
|||
|
||||
|
||||
/*
|
||||
UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
|
||||
UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle p)
|
||||
{
|
||||
const CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
GET_const_CLzmaEnc_p
|
||||
return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
|
||||
}
|
||||
*/
|
||||
|
||||
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
|
||||
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p)
|
||||
{
|
||||
const CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
// GET_const_CLzmaEnc_p
|
||||
return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
|
||||
}
|
||||
|
||||
|
||||
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
|
||||
// (desiredPackSize == 0) is not allowed
|
||||
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
|
||||
Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
// GET_CLzmaEnc_p
|
||||
UInt64 nowPos64;
|
||||
SRes res;
|
||||
CLzmaEnc_SeqOutStreamBuf outStream;
|
||||
|
@ -2990,14 +2971,10 @@ SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
|
|||
if (reInit)
|
||||
LzmaEnc_Init(p);
|
||||
LzmaEnc_InitPrices(p);
|
||||
|
||||
nowPos64 = p->nowPos64;
|
||||
RangeEnc_Init(&p->rc);
|
||||
p->rc.outStream = &outStream.vt;
|
||||
|
||||
if (desiredPackSize == 0)
|
||||
return SZ_ERROR_OUTPUT_EOF;
|
||||
|
||||
nowPos64 = p->nowPos64;
|
||||
|
||||
res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
|
||||
|
||||
*unpackSize = (UInt32)(p->nowPos64 - nowPos64);
|
||||
|
@ -3009,12 +2986,12 @@ SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
|
|||
}
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
|
||||
Z7_NO_INLINE
|
||||
static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgressPtr progress)
|
||||
{
|
||||
SRes res = SZ_OK;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
Byte allocaDummy[0x300];
|
||||
allocaDummy[0] = 0;
|
||||
allocaDummy[1] = allocaDummy[0];
|
||||
|
@ -3036,7 +3013,7 @@ static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
|
|||
}
|
||||
}
|
||||
|
||||
LzmaEnc_Finish(p);
|
||||
LzmaEnc_Finish((CLzmaEncHandle)(void *)p);
|
||||
|
||||
/*
|
||||
if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&MFB))
|
||||
|
@ -3048,21 +3025,22 @@ static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
|
|||
}
|
||||
|
||||
|
||||
SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
|
||||
SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress,
|
||||
ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
|
||||
return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
|
||||
// GET_CLzmaEnc_p
|
||||
RINOK(LzmaEnc_Prepare(p, outStream, inStream, alloc, allocBig))
|
||||
return LzmaEnc_Encode2(p, progress);
|
||||
}
|
||||
|
||||
|
||||
SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
|
||||
SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *props, SizeT *size)
|
||||
{
|
||||
if (*size < LZMA_PROPS_SIZE)
|
||||
return SZ_ERROR_PARAM;
|
||||
*size = LZMA_PROPS_SIZE;
|
||||
{
|
||||
const CLzmaEnc *p = (const CLzmaEnc *)pp;
|
||||
// GET_CLzmaEnc_p
|
||||
const UInt32 dictSize = p->dictSize;
|
||||
UInt32 v;
|
||||
props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
|
||||
|
@ -3086,23 +3064,24 @@ SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
|
|||
while (v < dictSize);
|
||||
}
|
||||
|
||||
SetUi32(props + 1, v);
|
||||
SetUi32(props + 1, v)
|
||||
return SZ_OK;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle pp)
|
||||
unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p)
|
||||
{
|
||||
return (unsigned)((CLzmaEnc *)pp)->writeEndMark;
|
||||
// GET_CLzmaEnc_p
|
||||
return (unsigned)p->writeEndMark;
|
||||
}
|
||||
|
||||
|
||||
SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
|
||||
int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
|
||||
int writeEndMark, ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
SRes res;
|
||||
CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
// GET_CLzmaEnc_p
|
||||
|
||||
CLzmaEnc_SeqOutStreamBuf outStream;
|
||||
|
||||
|
@ -3114,7 +3093,7 @@ SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte
|
|||
p->writeEndMark = writeEndMark;
|
||||
p->rc.outStream = &outStream.vt;
|
||||
|
||||
res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
|
||||
res = LzmaEnc_MemPrepare(p, src, srcLen, 0, alloc, allocBig);
|
||||
|
||||
if (res == SZ_OK)
|
||||
{
|
||||
|
@ -3123,7 +3102,7 @@ SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte
|
|||
res = SZ_ERROR_FAIL;
|
||||
}
|
||||
|
||||
*destLen -= outStream.rem;
|
||||
*destLen -= (SizeT)outStream.rem;
|
||||
if (outStream.overflow)
|
||||
return SZ_ERROR_OUTPUT_EOF;
|
||||
return res;
|
||||
|
@ -3132,9 +3111,9 @@ SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte
|
|||
|
||||
SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
|
||||
const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
|
||||
ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
||||
{
|
||||
CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
|
||||
CLzmaEncHandle p = LzmaEnc_Create(alloc);
|
||||
SRes res;
|
||||
if (!p)
|
||||
return SZ_ERROR_MEM;
|
||||
|
@ -3154,10 +3133,10 @@ SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
|
|||
|
||||
|
||||
/*
|
||||
#ifndef _7ZIP_ST
|
||||
void LzmaEnc_GetLzThreads(CLzmaEncHandle pp, HANDLE lz_threads[2])
|
||||
#ifndef Z7_ST
|
||||
void LzmaEnc_GetLzThreads(CLzmaEncHandle p, HANDLE lz_threads[2])
|
||||
{
|
||||
const CLzmaEnc *p = (CLzmaEnc *)pp;
|
||||
GET_const_CLzmaEnc_p
|
||||
lz_threads[0] = p->matchFinderMt.hashSync.thread;
|
||||
lz_threads[1] = p->matchFinderMt.btSync.thread;
|
||||
}
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
/* LzmaLib.c -- LZMA library wrapper
|
||||
2015-06-13 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "Alloc.h"
|
||||
#include "LzmaDec.h"
|
||||
#include "LzmaEnc.h"
|
||||
#include "LzmaLib.h"
|
||||
|
||||
MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
|
||||
Z7_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
|
||||
unsigned char *outProps, size_t *outPropsSize,
|
||||
int level, /* 0 <= level <= 9, default = 5 */
|
||||
unsigned dictSize, /* use (1 << N) or (3 << N). 4 KB < dictSize <= 128 MB */
|
||||
|
@ -32,7 +34,7 @@ MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char
|
|||
}
|
||||
|
||||
|
||||
MY_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t *srcLen,
|
||||
Z7_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t *srcLen,
|
||||
const unsigned char *props, size_t propsSize)
|
||||
{
|
||||
ELzmaStatus status;
|
||||
|
|
|
@ -0,0 +1,571 @@
|
|||
/* MtCoder.c -- Multi-thread Coder
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "MtCoder.h"
|
||||
|
||||
#ifndef Z7_ST
|
||||
|
||||
static SRes MtProgressThunk_Progress(ICompressProgressPtr pp, UInt64 inSize, UInt64 outSize)
|
||||
{
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CMtProgressThunk)
|
||||
UInt64 inSize2 = 0;
|
||||
UInt64 outSize2 = 0;
|
||||
if (inSize != (UInt64)(Int64)-1)
|
||||
{
|
||||
inSize2 = inSize - p->inSize;
|
||||
p->inSize = inSize;
|
||||
}
|
||||
if (outSize != (UInt64)(Int64)-1)
|
||||
{
|
||||
outSize2 = outSize - p->outSize;
|
||||
p->outSize = outSize;
|
||||
}
|
||||
return MtProgress_ProgressAdd(p->mtProgress, inSize2, outSize2);
|
||||
}
|
||||
|
||||
|
||||
void MtProgressThunk_CreateVTable(CMtProgressThunk *p)
|
||||
{
|
||||
p->vt.Progress = MtProgressThunk_Progress;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; }
|
||||
|
||||
|
||||
static THREAD_FUNC_DECL ThreadFunc(void *pp);
|
||||
|
||||
|
||||
static SRes MtCoderThread_CreateAndStart(CMtCoderThread *t)
|
||||
{
|
||||
WRes wres = AutoResetEvent_OptCreate_And_Reset(&t->startEvent);
|
||||
if (wres == 0)
|
||||
{
|
||||
t->stop = False;
|
||||
if (!Thread_WasCreated(&t->thread))
|
||||
wres = Thread_Create(&t->thread, ThreadFunc, t);
|
||||
if (wres == 0)
|
||||
wres = Event_Set(&t->startEvent);
|
||||
}
|
||||
if (wres == 0)
|
||||
return SZ_OK;
|
||||
return MY_SRes_HRESULT_FROM_WRes(wres);
|
||||
}
|
||||
|
||||
|
||||
static void MtCoderThread_Destruct(CMtCoderThread *t)
|
||||
{
|
||||
if (Thread_WasCreated(&t->thread))
|
||||
{
|
||||
t->stop = 1;
|
||||
Event_Set(&t->startEvent);
|
||||
Thread_Wait_Close(&t->thread);
|
||||
}
|
||||
|
||||
Event_Close(&t->startEvent);
|
||||
|
||||
if (t->inBuf)
|
||||
{
|
||||
ISzAlloc_Free(t->mtCoder->allocBig, t->inBuf);
|
||||
t->inBuf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
ThreadFunc2() returns:
|
||||
SZ_OK - in all normal cases (even for stream error or memory allocation error)
|
||||
SZ_ERROR_THREAD - in case of failure in system synch function
|
||||
*/
|
||||
|
||||
static SRes ThreadFunc2(CMtCoderThread *t)
|
||||
{
|
||||
CMtCoder *mtc = t->mtCoder;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
unsigned bi;
|
||||
SRes res;
|
||||
SRes res2;
|
||||
BoolInt finished;
|
||||
unsigned bufIndex;
|
||||
size_t size;
|
||||
const Byte *inData;
|
||||
UInt64 readProcessed = 0;
|
||||
|
||||
RINOK_THREAD(Event_Wait(&mtc->readEvent))
|
||||
|
||||
/* after Event_Wait(&mtc->readEvent) we must call Event_Set(&mtc->readEvent) in any case to unlock another threads */
|
||||
|
||||
if (mtc->stopReading)
|
||||
{
|
||||
return Event_Set(&mtc->readEvent) == 0 ? SZ_OK : SZ_ERROR_THREAD;
|
||||
}
|
||||
|
||||
res = MtProgress_GetError(&mtc->mtProgress);
|
||||
|
||||
size = 0;
|
||||
inData = NULL;
|
||||
finished = True;
|
||||
|
||||
if (res == SZ_OK)
|
||||
{
|
||||
size = mtc->blockSize;
|
||||
if (mtc->inStream)
|
||||
{
|
||||
if (!t->inBuf)
|
||||
{
|
||||
t->inBuf = (Byte *)ISzAlloc_Alloc(mtc->allocBig, mtc->blockSize);
|
||||
if (!t->inBuf)
|
||||
res = SZ_ERROR_MEM;
|
||||
}
|
||||
if (res == SZ_OK)
|
||||
{
|
||||
res = SeqInStream_ReadMax(mtc->inStream, t->inBuf, &size);
|
||||
readProcessed = mtc->readProcessed + size;
|
||||
mtc->readProcessed = readProcessed;
|
||||
}
|
||||
if (res != SZ_OK)
|
||||
{
|
||||
mtc->readRes = res;
|
||||
/* after reading error - we can stop encoding of previous blocks */
|
||||
MtProgress_SetError(&mtc->mtProgress, res);
|
||||
}
|
||||
else
|
||||
finished = (size != mtc->blockSize);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t rem;
|
||||
readProcessed = mtc->readProcessed;
|
||||
rem = mtc->inDataSize - (size_t)readProcessed;
|
||||
if (size > rem)
|
||||
size = rem;
|
||||
inData = mtc->inData + (size_t)readProcessed;
|
||||
readProcessed += size;
|
||||
mtc->readProcessed = readProcessed;
|
||||
finished = (mtc->inDataSize == (size_t)readProcessed);
|
||||
}
|
||||
}
|
||||
|
||||
/* we must get some block from blocksSemaphore before Event_Set(&mtc->readEvent) */
|
||||
|
||||
res2 = SZ_OK;
|
||||
|
||||
if (Semaphore_Wait(&mtc->blocksSemaphore) != 0)
|
||||
{
|
||||
res2 = SZ_ERROR_THREAD;
|
||||
if (res == SZ_OK)
|
||||
{
|
||||
res = res2;
|
||||
// MtProgress_SetError(&mtc->mtProgress, res);
|
||||
}
|
||||
}
|
||||
|
||||
bi = mtc->blockIndex;
|
||||
|
||||
if (++mtc->blockIndex >= mtc->numBlocksMax)
|
||||
mtc->blockIndex = 0;
|
||||
|
||||
bufIndex = (unsigned)(int)-1;
|
||||
|
||||
if (res == SZ_OK)
|
||||
res = MtProgress_GetError(&mtc->mtProgress);
|
||||
|
||||
if (res != SZ_OK)
|
||||
finished = True;
|
||||
|
||||
if (!finished)
|
||||
{
|
||||
if (mtc->numStartedThreads < mtc->numStartedThreadsLimit
|
||||
&& mtc->expectedDataSize != readProcessed)
|
||||
{
|
||||
res = MtCoderThread_CreateAndStart(&mtc->threads[mtc->numStartedThreads]);
|
||||
if (res == SZ_OK)
|
||||
mtc->numStartedThreads++;
|
||||
else
|
||||
{
|
||||
MtProgress_SetError(&mtc->mtProgress, res);
|
||||
finished = True;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (finished)
|
||||
mtc->stopReading = True;
|
||||
|
||||
RINOK_THREAD(Event_Set(&mtc->readEvent))
|
||||
|
||||
if (res2 != SZ_OK)
|
||||
return res2;
|
||||
|
||||
if (res == SZ_OK)
|
||||
{
|
||||
CriticalSection_Enter(&mtc->cs);
|
||||
bufIndex = mtc->freeBlockHead;
|
||||
mtc->freeBlockHead = mtc->freeBlockList[bufIndex];
|
||||
CriticalSection_Leave(&mtc->cs);
|
||||
|
||||
res = mtc->mtCallback->Code(mtc->mtCallbackObject, t->index, bufIndex,
|
||||
mtc->inStream ? t->inBuf : inData, size, finished);
|
||||
|
||||
// MtProgress_Reinit(&mtc->mtProgress, t->index);
|
||||
|
||||
if (res != SZ_OK)
|
||||
MtProgress_SetError(&mtc->mtProgress, res);
|
||||
}
|
||||
|
||||
{
|
||||
CMtCoderBlock *block = &mtc->blocks[bi];
|
||||
block->res = res;
|
||||
block->bufIndex = bufIndex;
|
||||
block->finished = finished;
|
||||
}
|
||||
|
||||
#ifdef MTCODER_USE_WRITE_THREAD
|
||||
RINOK_THREAD(Event_Set(&mtc->writeEvents[bi]))
|
||||
#else
|
||||
{
|
||||
unsigned wi;
|
||||
{
|
||||
CriticalSection_Enter(&mtc->cs);
|
||||
wi = mtc->writeIndex;
|
||||
if (wi == bi)
|
||||
mtc->writeIndex = (unsigned)(int)-1;
|
||||
else
|
||||
mtc->ReadyBlocks[bi] = True;
|
||||
CriticalSection_Leave(&mtc->cs);
|
||||
}
|
||||
|
||||
if (wi != bi)
|
||||
{
|
||||
if (res != SZ_OK || finished)
|
||||
return 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (mtc->writeRes != SZ_OK)
|
||||
res = mtc->writeRes;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (res == SZ_OK && bufIndex != (unsigned)(int)-1)
|
||||
{
|
||||
res = mtc->mtCallback->Write(mtc->mtCallbackObject, bufIndex);
|
||||
if (res != SZ_OK)
|
||||
{
|
||||
mtc->writeRes = res;
|
||||
MtProgress_SetError(&mtc->mtProgress, res);
|
||||
}
|
||||
}
|
||||
|
||||
if (++wi >= mtc->numBlocksMax)
|
||||
wi = 0;
|
||||
{
|
||||
BoolInt isReady;
|
||||
|
||||
CriticalSection_Enter(&mtc->cs);
|
||||
|
||||
if (bufIndex != (unsigned)(int)-1)
|
||||
{
|
||||
mtc->freeBlockList[bufIndex] = mtc->freeBlockHead;
|
||||
mtc->freeBlockHead = bufIndex;
|
||||
}
|
||||
|
||||
isReady = mtc->ReadyBlocks[wi];
|
||||
|
||||
if (isReady)
|
||||
mtc->ReadyBlocks[wi] = False;
|
||||
else
|
||||
mtc->writeIndex = wi;
|
||||
|
||||
CriticalSection_Leave(&mtc->cs);
|
||||
|
||||
RINOK_THREAD(Semaphore_Release1(&mtc->blocksSemaphore))
|
||||
|
||||
if (!isReady)
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
CMtCoderBlock *block = &mtc->blocks[wi];
|
||||
if (res == SZ_OK && block->res != SZ_OK)
|
||||
res = block->res;
|
||||
bufIndex = block->bufIndex;
|
||||
finished = block->finished;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (finished || res != SZ_OK)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static THREAD_FUNC_DECL ThreadFunc(void *pp)
|
||||
{
|
||||
CMtCoderThread *t = (CMtCoderThread *)pp;
|
||||
for (;;)
|
||||
{
|
||||
if (Event_Wait(&t->startEvent) != 0)
|
||||
return (THREAD_FUNC_RET_TYPE)SZ_ERROR_THREAD;
|
||||
if (t->stop)
|
||||
return 0;
|
||||
{
|
||||
SRes res = ThreadFunc2(t);
|
||||
CMtCoder *mtc = t->mtCoder;
|
||||
if (res != SZ_OK)
|
||||
{
|
||||
MtProgress_SetError(&mtc->mtProgress, res);
|
||||
}
|
||||
|
||||
#ifndef MTCODER_USE_WRITE_THREAD
|
||||
{
|
||||
unsigned numFinished = (unsigned)InterlockedIncrement(&mtc->numFinishedThreads);
|
||||
if (numFinished == mtc->numStartedThreads)
|
||||
if (Event_Set(&mtc->finishedEvent) != 0)
|
||||
return (THREAD_FUNC_RET_TYPE)SZ_ERROR_THREAD;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void MtCoder_Construct(CMtCoder *p)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
p->blockSize = 0;
|
||||
p->numThreadsMax = 0;
|
||||
p->expectedDataSize = (UInt64)(Int64)-1;
|
||||
|
||||
p->inStream = NULL;
|
||||
p->inData = NULL;
|
||||
p->inDataSize = 0;
|
||||
|
||||
p->progress = NULL;
|
||||
p->allocBig = NULL;
|
||||
|
||||
p->mtCallback = NULL;
|
||||
p->mtCallbackObject = NULL;
|
||||
|
||||
p->allocatedBufsSize = 0;
|
||||
|
||||
Event_Construct(&p->readEvent);
|
||||
Semaphore_Construct(&p->blocksSemaphore);
|
||||
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
{
|
||||
CMtCoderThread *t = &p->threads[i];
|
||||
t->mtCoder = p;
|
||||
t->index = i;
|
||||
t->inBuf = NULL;
|
||||
t->stop = False;
|
||||
Event_Construct(&t->startEvent);
|
||||
Thread_CONSTRUCT(&t->thread)
|
||||
}
|
||||
|
||||
#ifdef MTCODER_USE_WRITE_THREAD
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
Event_Construct(&p->writeEvents[i]);
|
||||
#else
|
||||
Event_Construct(&p->finishedEvent);
|
||||
#endif
|
||||
|
||||
CriticalSection_Init(&p->cs);
|
||||
CriticalSection_Init(&p->mtProgress.cs);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static void MtCoder_Free(CMtCoder *p)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
p->stopReading = True;
|
||||
if (Event_IsCreated(&p->readEvent))
|
||||
Event_Set(&p->readEvent);
|
||||
*/
|
||||
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
MtCoderThread_Destruct(&p->threads[i]);
|
||||
|
||||
Event_Close(&p->readEvent);
|
||||
Semaphore_Close(&p->blocksSemaphore);
|
||||
|
||||
#ifdef MTCODER_USE_WRITE_THREAD
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
Event_Close(&p->writeEvents[i]);
|
||||
#else
|
||||
Event_Close(&p->finishedEvent);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void MtCoder_Destruct(CMtCoder *p)
|
||||
{
|
||||
MtCoder_Free(p);
|
||||
|
||||
CriticalSection_Delete(&p->cs);
|
||||
CriticalSection_Delete(&p->mtProgress.cs);
|
||||
}
|
||||
|
||||
|
||||
SRes MtCoder_Code(CMtCoder *p)
|
||||
{
|
||||
unsigned numThreads = p->numThreadsMax;
|
||||
unsigned numBlocksMax;
|
||||
unsigned i;
|
||||
SRes res = SZ_OK;
|
||||
|
||||
if (numThreads > MTCODER_THREADS_MAX)
|
||||
numThreads = MTCODER_THREADS_MAX;
|
||||
numBlocksMax = MTCODER_GET_NUM_BLOCKS_FROM_THREADS(numThreads);
|
||||
|
||||
if (p->blockSize < ((UInt32)1 << 26)) numBlocksMax++;
|
||||
if (p->blockSize < ((UInt32)1 << 24)) numBlocksMax++;
|
||||
if (p->blockSize < ((UInt32)1 << 22)) numBlocksMax++;
|
||||
|
||||
if (numBlocksMax > MTCODER_BLOCKS_MAX)
|
||||
numBlocksMax = MTCODER_BLOCKS_MAX;
|
||||
|
||||
if (p->blockSize != p->allocatedBufsSize)
|
||||
{
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
{
|
||||
CMtCoderThread *t = &p->threads[i];
|
||||
if (t->inBuf)
|
||||
{
|
||||
ISzAlloc_Free(p->allocBig, t->inBuf);
|
||||
t->inBuf = NULL;
|
||||
}
|
||||
}
|
||||
p->allocatedBufsSize = p->blockSize;
|
||||
}
|
||||
|
||||
p->readRes = SZ_OK;
|
||||
|
||||
MtProgress_Init(&p->mtProgress, p->progress);
|
||||
|
||||
#ifdef MTCODER_USE_WRITE_THREAD
|
||||
for (i = 0; i < numBlocksMax; i++)
|
||||
{
|
||||
RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->writeEvents[i]))
|
||||
}
|
||||
#else
|
||||
RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->finishedEvent))
|
||||
#endif
|
||||
|
||||
{
|
||||
RINOK_THREAD(AutoResetEvent_OptCreate_And_Reset(&p->readEvent))
|
||||
RINOK_THREAD(Semaphore_OptCreateInit(&p->blocksSemaphore, numBlocksMax, numBlocksMax))
|
||||
}
|
||||
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX - 1; i++)
|
||||
p->freeBlockList[i] = i + 1;
|
||||
p->freeBlockList[MTCODER_BLOCKS_MAX - 1] = (unsigned)(int)-1;
|
||||
p->freeBlockHead = 0;
|
||||
|
||||
p->readProcessed = 0;
|
||||
p->blockIndex = 0;
|
||||
p->numBlocksMax = numBlocksMax;
|
||||
p->stopReading = False;
|
||||
|
||||
#ifndef MTCODER_USE_WRITE_THREAD
|
||||
p->writeIndex = 0;
|
||||
p->writeRes = SZ_OK;
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
p->ReadyBlocks[i] = False;
|
||||
p->numFinishedThreads = 0;
|
||||
#endif
|
||||
|
||||
p->numStartedThreadsLimit = numThreads;
|
||||
p->numStartedThreads = 0;
|
||||
|
||||
// for (i = 0; i < numThreads; i++)
|
||||
{
|
||||
CMtCoderThread *nextThread = &p->threads[p->numStartedThreads++];
|
||||
RINOK(MtCoderThread_CreateAndStart(nextThread))
|
||||
}
|
||||
|
||||
RINOK_THREAD(Event_Set(&p->readEvent))
|
||||
|
||||
#ifdef MTCODER_USE_WRITE_THREAD
|
||||
{
|
||||
unsigned bi = 0;
|
||||
|
||||
for (;; bi++)
|
||||
{
|
||||
if (bi >= numBlocksMax)
|
||||
bi = 0;
|
||||
|
||||
RINOK_THREAD(Event_Wait(&p->writeEvents[bi]))
|
||||
|
||||
{
|
||||
const CMtCoderBlock *block = &p->blocks[bi];
|
||||
unsigned bufIndex = block->bufIndex;
|
||||
BoolInt finished = block->finished;
|
||||
if (res == SZ_OK && block->res != SZ_OK)
|
||||
res = block->res;
|
||||
|
||||
if (bufIndex != (unsigned)(int)-1)
|
||||
{
|
||||
if (res == SZ_OK)
|
||||
{
|
||||
res = p->mtCallback->Write(p->mtCallbackObject, bufIndex);
|
||||
if (res != SZ_OK)
|
||||
MtProgress_SetError(&p->mtProgress, res);
|
||||
}
|
||||
|
||||
CriticalSection_Enter(&p->cs);
|
||||
{
|
||||
p->freeBlockList[bufIndex] = p->freeBlockHead;
|
||||
p->freeBlockHead = bufIndex;
|
||||
}
|
||||
CriticalSection_Leave(&p->cs);
|
||||
}
|
||||
|
||||
RINOK_THREAD(Semaphore_Release1(&p->blocksSemaphore))
|
||||
|
||||
if (finished)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
{
|
||||
WRes wres = Event_Wait(&p->finishedEvent);
|
||||
res = MY_SRes_HRESULT_FROM_WRes(wres);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (res == SZ_OK)
|
||||
res = p->readRes;
|
||||
|
||||
if (res == SZ_OK)
|
||||
res = p->mtProgress.res;
|
||||
|
||||
#ifndef MTCODER_USE_WRITE_THREAD
|
||||
if (res == SZ_OK)
|
||||
res = p->writeRes;
|
||||
#endif
|
||||
|
||||
if (res != SZ_OK)
|
||||
MtCoder_Free(p);
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#undef RINOK_THREAD
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/* Ppmd7.c -- PPMdH codec
|
||||
2021-04-13 : Igor Pavlov : Public domain
|
||||
2023-04-02 : Igor Pavlov : Public domain
|
||||
This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
@ -14,7 +14,7 @@ This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
|
|||
MY_ALIGN(16)
|
||||
static const Byte PPMD7_kExpEscape[16] = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
|
||||
MY_ALIGN(16)
|
||||
static const UInt16 kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051};
|
||||
static const UInt16 PPMD7_kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051};
|
||||
|
||||
#define MAX_FREQ 124
|
||||
#define UNIT_SIZE 12
|
||||
|
@ -33,7 +33,7 @@ static const UInt16 kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x
|
|||
#define ONE_STATE(ctx) Ppmd7Context_OneState(ctx)
|
||||
#define SUFFIX(ctx) CTX((ctx)->Suffix)
|
||||
|
||||
typedef CPpmd7_Context * CTX_PTR;
|
||||
typedef CPpmd7_Context * PPMD7_CTX_PTR;
|
||||
|
||||
struct CPpmd7_Node_;
|
||||
|
||||
|
@ -107,14 +107,14 @@ BoolInt Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAllocPtr alloc)
|
|||
// ---------- Internal Memory Allocator ----------
|
||||
|
||||
/* We can use CPpmd7_Node in list of free units (as in Ppmd8)
|
||||
But we still need one additional list walk pass in GlueFreeBlocks().
|
||||
So we use simple CPpmd_Void_Ref instead of CPpmd7_Node in InsertNode() / RemoveNode()
|
||||
But we still need one additional list walk pass in Ppmd7_GlueFreeBlocks().
|
||||
So we use simple CPpmd_Void_Ref instead of CPpmd7_Node in Ppmd7_InsertNode() / Ppmd7_RemoveNode()
|
||||
*/
|
||||
|
||||
#define EMPTY_NODE 0
|
||||
|
||||
|
||||
static void InsertNode(CPpmd7 *p, void *node, unsigned indx)
|
||||
static void Ppmd7_InsertNode(CPpmd7 *p, void *node, unsigned indx)
|
||||
{
|
||||
*((CPpmd_Void_Ref *)node) = p->FreeList[indx];
|
||||
// ((CPpmd7_Node *)node)->Next = (CPpmd7_Node_Ref)p->FreeList[indx];
|
||||
|
@ -124,7 +124,7 @@ static void InsertNode(CPpmd7 *p, void *node, unsigned indx)
|
|||
}
|
||||
|
||||
|
||||
static void *RemoveNode(CPpmd7 *p, unsigned indx)
|
||||
static void *Ppmd7_RemoveNode(CPpmd7 *p, unsigned indx)
|
||||
{
|
||||
CPpmd_Void_Ref *node = (CPpmd_Void_Ref *)Ppmd7_GetPtr(p, p->FreeList[indx]);
|
||||
p->FreeList[indx] = *node;
|
||||
|
@ -134,32 +134,32 @@ static void *RemoveNode(CPpmd7 *p, unsigned indx)
|
|||
}
|
||||
|
||||
|
||||
static void SplitBlock(CPpmd7 *p, void *ptr, unsigned oldIndx, unsigned newIndx)
|
||||
static void Ppmd7_SplitBlock(CPpmd7 *p, void *ptr, unsigned oldIndx, unsigned newIndx)
|
||||
{
|
||||
unsigned i, nu = I2U(oldIndx) - I2U(newIndx);
|
||||
ptr = (Byte *)ptr + U2B(I2U(newIndx));
|
||||
if (I2U(i = U2I(nu)) != nu)
|
||||
{
|
||||
unsigned k = I2U(--i);
|
||||
InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1);
|
||||
Ppmd7_InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1);
|
||||
}
|
||||
InsertNode(p, ptr, i);
|
||||
Ppmd7_InsertNode(p, ptr, i);
|
||||
}
|
||||
|
||||
|
||||
/* we use CPpmd7_Node_Union union to solve XLC -O2 strict pointer aliasing problem */
|
||||
|
||||
typedef union _CPpmd7_Node_Union
|
||||
typedef union
|
||||
{
|
||||
CPpmd7_Node Node;
|
||||
CPpmd7_Node_Ref NextRef;
|
||||
} CPpmd7_Node_Union;
|
||||
|
||||
/* Original PPmdH (Ppmd7) code uses doubly linked list in GlueFreeBlocks()
|
||||
/* Original PPmdH (Ppmd7) code uses doubly linked list in Ppmd7_GlueFreeBlocks()
|
||||
we use single linked list similar to Ppmd8 code */
|
||||
|
||||
|
||||
static void GlueFreeBlocks(CPpmd7 *p)
|
||||
static void Ppmd7_GlueFreeBlocks(CPpmd7 *p)
|
||||
{
|
||||
/*
|
||||
we use first UInt16 field of 12-bytes UNITs as record type stamp
|
||||
|
@ -239,27 +239,27 @@ static void GlueFreeBlocks(CPpmd7 *p)
|
|||
if (nu == 0)
|
||||
continue;
|
||||
for (; nu > 128; nu -= 128, node += 128)
|
||||
InsertNode(p, node, PPMD_NUM_INDEXES - 1);
|
||||
Ppmd7_InsertNode(p, node, PPMD_NUM_INDEXES - 1);
|
||||
if (I2U(i = U2I(nu)) != nu)
|
||||
{
|
||||
unsigned k = I2U(--i);
|
||||
InsertNode(p, node + k, (unsigned)nu - k - 1);
|
||||
Ppmd7_InsertNode(p, node + k, (unsigned)nu - k - 1);
|
||||
}
|
||||
InsertNode(p, node, i);
|
||||
Ppmd7_InsertNode(p, node, i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
static void *AllocUnitsRare(CPpmd7 *p, unsigned indx)
|
||||
Z7_NO_INLINE
|
||||
static void *Ppmd7_AllocUnitsRare(CPpmd7 *p, unsigned indx)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (p->GlueCount == 0)
|
||||
{
|
||||
GlueFreeBlocks(p);
|
||||
Ppmd7_GlueFreeBlocks(p);
|
||||
if (p->FreeList[indx] != 0)
|
||||
return RemoveNode(p, indx);
|
||||
return Ppmd7_RemoveNode(p, indx);
|
||||
}
|
||||
|
||||
i = indx;
|
||||
|
@ -277,17 +277,17 @@ static void *AllocUnitsRare(CPpmd7 *p, unsigned indx)
|
|||
while (p->FreeList[i] == 0);
|
||||
|
||||
{
|
||||
void *block = RemoveNode(p, i);
|
||||
SplitBlock(p, block, i, indx);
|
||||
void *block = Ppmd7_RemoveNode(p, i);
|
||||
Ppmd7_SplitBlock(p, block, i, indx);
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void *AllocUnits(CPpmd7 *p, unsigned indx)
|
||||
static void *Ppmd7_AllocUnits(CPpmd7 *p, unsigned indx)
|
||||
{
|
||||
if (p->FreeList[indx] != 0)
|
||||
return RemoveNode(p, indx);
|
||||
return Ppmd7_RemoveNode(p, indx);
|
||||
{
|
||||
UInt32 numBytes = U2B(I2U(indx));
|
||||
Byte *lo = p->LoUnit;
|
||||
|
@ -297,11 +297,11 @@ static void *AllocUnits(CPpmd7 *p, unsigned indx)
|
|||
return lo;
|
||||
}
|
||||
}
|
||||
return AllocUnitsRare(p, indx);
|
||||
return Ppmd7_AllocUnitsRare(p, indx);
|
||||
}
|
||||
|
||||
|
||||
#define MyMem12Cpy(dest, src, num) \
|
||||
#define MEM_12_CPY(dest, src, num) \
|
||||
{ UInt32 *d = (UInt32 *)dest; const UInt32 *z = (const UInt32 *)src; UInt32 n = num; \
|
||||
do { d[0] = z[0]; d[1] = z[1]; d[2] = z[2]; z += 3; d += 3; } while (--n); }
|
||||
|
||||
|
@ -315,12 +315,12 @@ static void *ShrinkUnits(CPpmd7 *p, void *oldPtr, unsigned oldNU, unsigned newNU
|
|||
return oldPtr;
|
||||
if (p->FreeList[i1] != 0)
|
||||
{
|
||||
void *ptr = RemoveNode(p, i1);
|
||||
MyMem12Cpy(ptr, oldPtr, newNU);
|
||||
InsertNode(p, oldPtr, i0);
|
||||
void *ptr = Ppmd7_RemoveNode(p, i1);
|
||||
MEM_12_CPY(ptr, oldPtr, newNU)
|
||||
Ppmd7_InsertNode(p, oldPtr, i0);
|
||||
return ptr;
|
||||
}
|
||||
SplitBlock(p, oldPtr, i0, i1);
|
||||
Ppmd7_SplitBlock(p, oldPtr, i0, i1);
|
||||
return oldPtr;
|
||||
}
|
||||
*/
|
||||
|
@ -329,14 +329,14 @@ static void *ShrinkUnits(CPpmd7 *p, void *oldPtr, unsigned oldNU, unsigned newNU
|
|||
#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
|
||||
static void SetSuccessor(CPpmd_State *p, CPpmd_Void_Ref v)
|
||||
{
|
||||
Ppmd_SET_SUCCESSOR(p, v);
|
||||
Ppmd_SET_SUCCESSOR(p, v)
|
||||
}
|
||||
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
static
|
||||
void RestartModel(CPpmd7 *p)
|
||||
void Ppmd7_RestartModel(CPpmd7 *p)
|
||||
{
|
||||
unsigned i, k;
|
||||
|
||||
|
@ -352,8 +352,8 @@ void RestartModel(CPpmd7 *p)
|
|||
p->PrevSuccess = 0;
|
||||
|
||||
{
|
||||
CPpmd7_Context *mc = (CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */
|
||||
CPpmd_State *s = (CPpmd_State *)p->LoUnit; /* AllocUnits(p, PPMD_NUM_INDEXES - 1); */
|
||||
CPpmd7_Context *mc = (PPMD7_CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */
|
||||
CPpmd_State *s = (CPpmd_State *)p->LoUnit; /* Ppmd7_AllocUnits(p, PPMD_NUM_INDEXES - 1); */
|
||||
|
||||
p->LoUnit += U2B(256 / 2);
|
||||
p->MaxContext = p->MinContext = mc;
|
||||
|
@ -391,7 +391,7 @@ void RestartModel(CPpmd7 *p)
|
|||
{
|
||||
unsigned m;
|
||||
UInt16 *dest = p->BinSumm[i] + k;
|
||||
UInt16 val = (UInt16)(PPMD_BIN_SCALE - kInitBinEsc[k] / (i + 2));
|
||||
const UInt16 val = (UInt16)(PPMD_BIN_SCALE - PPMD7_kInitBinEsc[k] / (i + 2));
|
||||
for (m = 0; m < 64; m += 8)
|
||||
dest[m] = val;
|
||||
}
|
||||
|
@ -423,13 +423,13 @@ void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder)
|
|||
{
|
||||
p->MaxOrder = maxOrder;
|
||||
|
||||
RestartModel(p);
|
||||
Ppmd7_RestartModel(p);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
CreateSuccessors()
|
||||
Ppmd7_CreateSuccessors()
|
||||
It's called when (FoundState->Successor) is RAW-Successor,
|
||||
that is the link to position in Raw text.
|
||||
So we create Context records and write the links to
|
||||
|
@ -445,10 +445,10 @@ void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder)
|
|||
also it can return pointer to real context of same order,
|
||||
*/
|
||||
|
||||
MY_NO_INLINE
|
||||
static CTX_PTR CreateSuccessors(CPpmd7 *p)
|
||||
Z7_NO_INLINE
|
||||
static PPMD7_CTX_PTR Ppmd7_CreateSuccessors(CPpmd7 *p)
|
||||
{
|
||||
CTX_PTR c = p->MinContext;
|
||||
PPMD7_CTX_PTR c = p->MinContext;
|
||||
CPpmd_Byte_Ref upBranch = (CPpmd_Byte_Ref)SUCCESSOR(p->FoundState);
|
||||
Byte newSym, newFreq;
|
||||
unsigned numPs = 0;
|
||||
|
@ -522,15 +522,15 @@ static CTX_PTR CreateSuccessors(CPpmd7 *p)
|
|||
|
||||
do
|
||||
{
|
||||
CTX_PTR c1;
|
||||
PPMD7_CTX_PTR c1;
|
||||
/* = AllocContext(p); */
|
||||
if (p->HiUnit != p->LoUnit)
|
||||
c1 = (CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE);
|
||||
c1 = (PPMD7_CTX_PTR)(void *)(p->HiUnit -= UNIT_SIZE);
|
||||
else if (p->FreeList[0] != 0)
|
||||
c1 = (CTX_PTR)RemoveNode(p, 0);
|
||||
c1 = (PPMD7_CTX_PTR)Ppmd7_RemoveNode(p, 0);
|
||||
else
|
||||
{
|
||||
c1 = (CTX_PTR)AllocUnitsRare(p, 0);
|
||||
c1 = (PPMD7_CTX_PTR)Ppmd7_AllocUnitsRare(p, 0);
|
||||
if (!c1)
|
||||
return NULL;
|
||||
}
|
||||
|
@ -550,16 +550,16 @@ static CTX_PTR CreateSuccessors(CPpmd7 *p)
|
|||
|
||||
|
||||
|
||||
#define SwapStates(s) \
|
||||
#define SWAP_STATES(s) \
|
||||
{ CPpmd_State tmp = s[0]; s[0] = s[-1]; s[-1] = tmp; }
|
||||
|
||||
|
||||
void Ppmd7_UpdateModel(CPpmd7 *p);
|
||||
MY_NO_INLINE
|
||||
Z7_NO_INLINE
|
||||
void Ppmd7_UpdateModel(CPpmd7 *p)
|
||||
{
|
||||
CPpmd_Void_Ref maxSuccessor, minSuccessor;
|
||||
CTX_PTR c, mc;
|
||||
PPMD7_CTX_PTR c, mc;
|
||||
unsigned s0, ns;
|
||||
|
||||
|
||||
|
@ -592,7 +592,7 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
|
||||
if (s[0].Freq >= s[-1].Freq)
|
||||
{
|
||||
SwapStates(s);
|
||||
SWAP_STATES(s)
|
||||
s--;
|
||||
}
|
||||
}
|
||||
|
@ -610,10 +610,10 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
{
|
||||
/* MAX ORDER context */
|
||||
/* (FoundState->Successor) is RAW-Successor. */
|
||||
p->MaxContext = p->MinContext = CreateSuccessors(p);
|
||||
p->MaxContext = p->MinContext = Ppmd7_CreateSuccessors(p);
|
||||
if (!p->MinContext)
|
||||
{
|
||||
RestartModel(p);
|
||||
Ppmd7_RestartModel(p);
|
||||
return;
|
||||
}
|
||||
SetSuccessor(p->FoundState, REF(p->MinContext));
|
||||
|
@ -629,7 +629,7 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
p->Text = text;
|
||||
if (text >= p->UnitsStart)
|
||||
{
|
||||
RestartModel(p);
|
||||
Ppmd7_RestartModel(p);
|
||||
return;
|
||||
}
|
||||
maxSuccessor = REF(text);
|
||||
|
@ -645,10 +645,10 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
if (minSuccessor <= maxSuccessor)
|
||||
{
|
||||
// minSuccessor is RAW-Successor. So we will create real contexts records:
|
||||
CTX_PTR cs = CreateSuccessors(p);
|
||||
PPMD7_CTX_PTR cs = Ppmd7_CreateSuccessors(p);
|
||||
if (!cs)
|
||||
{
|
||||
RestartModel(p);
|
||||
Ppmd7_RestartModel(p);
|
||||
return;
|
||||
}
|
||||
minSuccessor = REF(cs);
|
||||
|
@ -715,16 +715,16 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
unsigned i = U2I(oldNU);
|
||||
if (i != U2I((size_t)oldNU + 1))
|
||||
{
|
||||
void *ptr = AllocUnits(p, i + 1);
|
||||
void *ptr = Ppmd7_AllocUnits(p, i + 1);
|
||||
void *oldPtr;
|
||||
if (!ptr)
|
||||
{
|
||||
RestartModel(p);
|
||||
Ppmd7_RestartModel(p);
|
||||
return;
|
||||
}
|
||||
oldPtr = STATS(c);
|
||||
MyMem12Cpy(ptr, oldPtr, oldNU);
|
||||
InsertNode(p, oldPtr, i);
|
||||
MEM_12_CPY(ptr, oldPtr, oldNU)
|
||||
Ppmd7_InsertNode(p, oldPtr, i);
|
||||
c->Union4.Stats = STATS_REF(ptr);
|
||||
}
|
||||
}
|
||||
|
@ -739,10 +739,10 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
else
|
||||
{
|
||||
// instead of One-symbol context we create 2-symbol context
|
||||
CPpmd_State *s = (CPpmd_State*)AllocUnits(p, 0);
|
||||
CPpmd_State *s = (CPpmd_State*)Ppmd7_AllocUnits(p, 0);
|
||||
if (!s)
|
||||
{
|
||||
RestartModel(p);
|
||||
Ppmd7_RestartModel(p);
|
||||
return;
|
||||
}
|
||||
{
|
||||
|
@ -795,8 +795,8 @@ void Ppmd7_UpdateModel(CPpmd7 *p)
|
|||
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
static void Rescale(CPpmd7 *p)
|
||||
Z7_NO_INLINE
|
||||
static void Ppmd7_Rescale(CPpmd7 *p)
|
||||
{
|
||||
unsigned i, adder, sumFreq, escFreq;
|
||||
CPpmd_State *stats = STATS(p->MinContext);
|
||||
|
@ -885,7 +885,7 @@ static void Rescale(CPpmd7 *p)
|
|||
*s = *stats;
|
||||
s->Freq = (Byte)freq; // (freq <= 260 / 4)
|
||||
p->FoundState = s;
|
||||
InsertNode(p, stats, U2I(n0));
|
||||
Ppmd7_InsertNode(p, stats, U2I(n0));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -899,13 +899,13 @@ static void Rescale(CPpmd7 *p)
|
|||
{
|
||||
if (p->FreeList[i1] != 0)
|
||||
{
|
||||
void *ptr = RemoveNode(p, i1);
|
||||
void *ptr = Ppmd7_RemoveNode(p, i1);
|
||||
p->MinContext->Union4.Stats = STATS_REF(ptr);
|
||||
MyMem12Cpy(ptr, (const void *)stats, n1);
|
||||
InsertNode(p, stats, i0);
|
||||
MEM_12_CPY(ptr, (const void *)stats, n1)
|
||||
Ppmd7_InsertNode(p, stats, i0);
|
||||
}
|
||||
else
|
||||
SplitBlock(p, stats, i0, i1);
|
||||
Ppmd7_SplitBlock(p, stats, i0, i1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -948,9 +948,9 @@ CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *escFreq)
|
|||
}
|
||||
|
||||
|
||||
static void NextContext(CPpmd7 *p)
|
||||
static void Ppmd7_NextContext(CPpmd7 *p)
|
||||
{
|
||||
CTX_PTR c = CTX(SUCCESSOR(p->FoundState));
|
||||
PPMD7_CTX_PTR c = CTX(SUCCESSOR(p->FoundState));
|
||||
if (p->OrderFall == 0 && (const Byte *)c > p->Text)
|
||||
p->MaxContext = p->MinContext = c;
|
||||
else
|
||||
|
@ -967,12 +967,12 @@ void Ppmd7_Update1(CPpmd7 *p)
|
|||
s->Freq = (Byte)freq;
|
||||
if (freq > s[-1].Freq)
|
||||
{
|
||||
SwapStates(s);
|
||||
SWAP_STATES(s)
|
||||
p->FoundState = --s;
|
||||
if (freq > MAX_FREQ)
|
||||
Rescale(p);
|
||||
Ppmd7_Rescale(p);
|
||||
}
|
||||
NextContext(p);
|
||||
Ppmd7_NextContext(p);
|
||||
}
|
||||
|
||||
|
||||
|
@ -988,8 +988,8 @@ void Ppmd7_Update1_0(CPpmd7 *p)
|
|||
freq += 4;
|
||||
s->Freq = (Byte)freq;
|
||||
if (freq > MAX_FREQ)
|
||||
Rescale(p);
|
||||
NextContext(p);
|
||||
Ppmd7_Rescale(p);
|
||||
Ppmd7_NextContext(p);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1000,7 +1000,7 @@ void Ppmd7_UpdateBin(CPpmd7 *p)
|
|||
p->FoundState->Freq = (Byte)(freq + (freq < 128));
|
||||
p->PrevSuccess = 1;
|
||||
p->RunLength++;
|
||||
NextContext(p);
|
||||
Ppmd7_NextContext(p);
|
||||
}
|
||||
*/
|
||||
|
||||
|
@ -1013,7 +1013,7 @@ void Ppmd7_Update2(CPpmd7 *p)
|
|||
p->MinContext->Union2.SummFreq = (UInt16)(p->MinContext->Union2.SummFreq + 4);
|
||||
s->Freq = (Byte)freq;
|
||||
if (freq > MAX_FREQ)
|
||||
Rescale(p);
|
||||
Ppmd7_Rescale(p);
|
||||
Ppmd7_UpdateModel(p);
|
||||
}
|
||||
|
||||
|
@ -1042,8 +1042,8 @@ Last UNIT of array at offset (Size - 12) is root order-0 CPpmd7_Context record.
|
|||
The code can free UNITs memory blocks that were allocated to store CPpmd_State vectors.
|
||||
The code doesn't free UNITs allocated for CPpmd7_Context records.
|
||||
|
||||
The code calls RestartModel(), when there is no free memory for allocation.
|
||||
And RestartModel() changes the state to orignal start state, with full free block.
|
||||
The code calls Ppmd7_RestartModel(), when there is no free memory for allocation.
|
||||
And Ppmd7_RestartModel() changes the state to orignal start state, with full free block.
|
||||
|
||||
|
||||
The code allocates UNITs with the following order:
|
||||
|
@ -1051,14 +1051,14 @@ The code allocates UNITs with the following order:
|
|||
Allocation of 1 UNIT for Context record
|
||||
- from free space (HiUnit) down to (LoUnit)
|
||||
- from FreeList[0]
|
||||
- AllocUnitsRare()
|
||||
- Ppmd7_AllocUnitsRare()
|
||||
|
||||
AllocUnits() for CPpmd_State vectors:
|
||||
Ppmd7_AllocUnits() for CPpmd_State vectors:
|
||||
- from FreeList[i]
|
||||
- from free space (LoUnit) up to (HiUnit)
|
||||
- AllocUnitsRare()
|
||||
- Ppmd7_AllocUnitsRare()
|
||||
|
||||
AllocUnitsRare()
|
||||
Ppmd7_AllocUnitsRare()
|
||||
- if (GlueCount == 0)
|
||||
{ Glue lists, GlueCount = 255, allocate from FreeList[i]] }
|
||||
- loop for all higher sized FreeList[...] lists
|
||||
|
@ -1093,8 +1093,8 @@ The PPMd code tries to fulfill the condition:
|
|||
We have (Sum(Stats[].Freq) <= 256 * 124), because of (MAX_FREQ = 124)
|
||||
So (4 = 128 - 124) is average reserve for Escape_Freq for each symbol.
|
||||
If (CPpmd_State::Freq) is not aligned for 4, the reserve can be 5, 6 or 7.
|
||||
SummFreq and Escape_Freq can be changed in Rescale() and *Update*() functions.
|
||||
Rescale() can remove symbols only from max-order contexts. So Escape_Freq can increase after multiple calls of Rescale() for
|
||||
SummFreq and Escape_Freq can be changed in Ppmd7_Rescale() and *Update*() functions.
|
||||
Ppmd7_Rescale() can remove symbols only from max-order contexts. So Escape_Freq can increase after multiple calls of Ppmd7_Rescale() for
|
||||
max-order context.
|
||||
|
||||
When the PPMd code still break (Total <= RC::Range) condition in range coder,
|
||||
|
@ -1102,3 +1102,21 @@ we have two ways to resolve that problem:
|
|||
1) we can report error, if we want to keep compatibility with original PPMd code that has no fix for such cases.
|
||||
2) we can reduce (Total) value to (RC::Range) by reducing (Escape_Freq) part of (Total) value.
|
||||
*/
|
||||
|
||||
#undef MAX_FREQ
|
||||
#undef UNIT_SIZE
|
||||
#undef U2B
|
||||
#undef U2I
|
||||
#undef I2U
|
||||
#undef I2U_UInt16
|
||||
#undef REF
|
||||
#undef STATS_REF
|
||||
#undef CTX
|
||||
#undef STATS
|
||||
#undef ONE_STATE
|
||||
#undef SUFFIX
|
||||
#undef NODE
|
||||
#undef EMPTY_NODE
|
||||
#undef MEM_12_CPY
|
||||
#undef SUCCESSOR
|
||||
#undef SWAP_STATES
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Ppmd7Dec.c -- Ppmd7z (PPMdH with 7z Range Coder) Decoder
|
||||
2021-04-13 : Igor Pavlov : Public domain
|
||||
2023-04-02 : Igor Pavlov : Public domain
|
||||
This code is based on:
|
||||
PPMd var.H (2001): Dmitry Shkarin : Public domain */
|
||||
|
||||
|
@ -8,7 +8,7 @@ This code is based on:
|
|||
|
||||
#include "Ppmd7.h"
|
||||
|
||||
#define kTopValue (1 << 24)
|
||||
#define kTopValue ((UInt32)1 << 24)
|
||||
|
||||
|
||||
#define READ_BYTE(p) IByteIn_Read((p)->Stream)
|
||||
|
@ -37,9 +37,9 @@ BoolInt Ppmd7z_RangeDec_Init(CPpmd7_RangeDec *p)
|
|||
|
||||
#define R (&p->rc.dec)
|
||||
|
||||
MY_FORCE_INLINE
|
||||
// MY_NO_INLINE
|
||||
static void RangeDec_Decode(CPpmd7 *p, UInt32 start, UInt32 size)
|
||||
Z7_FORCE_INLINE
|
||||
// Z7_NO_INLINE
|
||||
static void Ppmd7z_RD_Decode(CPpmd7 *p, UInt32 start, UInt32 size)
|
||||
{
|
||||
|
||||
|
||||
|
@ -48,18 +48,18 @@ static void RangeDec_Decode(CPpmd7 *p, UInt32 start, UInt32 size)
|
|||
RC_NORM_LOCAL(R)
|
||||
}
|
||||
|
||||
#define RC_Decode(start, size) RangeDec_Decode(p, start, size);
|
||||
#define RC_DecodeFinal(start, size) RC_Decode(start, size) RC_NORM_REMOTE(R)
|
||||
#define RC_GetThreshold(total) (R->Code / (R->Range /= (total)))
|
||||
#define RC_Decode(start, size) Ppmd7z_RD_Decode(p, start, size);
|
||||
#define RC_DecodeFinal(start, size) RC_Decode(start, size) RC_NORM_REMOTE(R)
|
||||
#define RC_GetThreshold(total) (R->Code / (R->Range /= (total)))
|
||||
|
||||
|
||||
#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
|
||||
typedef CPpmd7_Context * CTX_PTR;
|
||||
// typedef CPpmd7_Context * CTX_PTR;
|
||||
#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
|
||||
void Ppmd7_UpdateModel(CPpmd7 *p);
|
||||
|
||||
#define MASK(sym) ((unsigned char *)charMask)[sym]
|
||||
// MY_FORCE_INLINE
|
||||
// Z7_FORCE_INLINE
|
||||
// static
|
||||
int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
||||
{
|
||||
|
@ -70,7 +70,7 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext);
|
||||
unsigned i;
|
||||
UInt32 count, hiCnt;
|
||||
UInt32 summFreq = p->MinContext->Union2.SummFreq;
|
||||
const UInt32 summFreq = p->MinContext->Union2.SummFreq;
|
||||
|
||||
|
||||
|
||||
|
@ -81,7 +81,7 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
if ((Int32)(count -= s->Freq) < 0)
|
||||
{
|
||||
Byte sym;
|
||||
RC_DecodeFinal(0, s->Freq);
|
||||
RC_DecodeFinal(0, s->Freq)
|
||||
p->FoundState = s;
|
||||
sym = s->Symbol;
|
||||
Ppmd7_Update1_0(p);
|
||||
|
@ -96,7 +96,7 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
if ((Int32)(count -= (++s)->Freq) < 0)
|
||||
{
|
||||
Byte sym;
|
||||
RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq);
|
||||
RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
|
||||
p->FoundState = s;
|
||||
sym = s->Symbol;
|
||||
Ppmd7_Update1(p);
|
||||
|
@ -109,10 +109,10 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
return PPMD7_SYM_ERROR;
|
||||
|
||||
hiCnt -= count;
|
||||
RC_Decode(hiCnt, summFreq - hiCnt);
|
||||
RC_Decode(hiCnt, summFreq - hiCnt)
|
||||
|
||||
p->HiBitsFlag = PPMD7_HiBitsFlag_3(p->FoundState->Symbol);
|
||||
PPMD_SetAllBitsIn256Bytes(charMask);
|
||||
PPMD_SetAllBitsIn256Bytes(charMask)
|
||||
// i = p->MinContext->NumStats - 1;
|
||||
// do { MASK((--s)->Symbol) = 0; } while (--i);
|
||||
{
|
||||
|
@ -152,7 +152,7 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
// Ppmd7_UpdateBin(p);
|
||||
{
|
||||
unsigned freq = s->Freq;
|
||||
CTX_PTR c = CTX(SUCCESSOR(s));
|
||||
CPpmd7_Context *c = CTX(SUCCESSOR(s));
|
||||
sym = s->Symbol;
|
||||
p->FoundState = s;
|
||||
p->PrevSuccess = 1;
|
||||
|
@ -176,7 +176,7 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
R->Range -= size0;
|
||||
RC_NORM_LOCAL(R)
|
||||
|
||||
PPMD_SetAllBitsIn256Bytes(charMask);
|
||||
PPMD_SetAllBitsIn256Bytes(charMask)
|
||||
MASK(Ppmd7Context_OneState(p->MinContext)->Symbol) = 0;
|
||||
p->PrevSuccess = 0;
|
||||
}
|
||||
|
@ -245,13 +245,13 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
{
|
||||
count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
|
||||
// count -= s->Freq & (unsigned)(MASK((s)->Symbol)); s++; if ((Int32)count < 0) break;
|
||||
};
|
||||
}
|
||||
}
|
||||
s--;
|
||||
RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq);
|
||||
RC_DecodeFinal((hiCnt - count) - s->Freq, s->Freq)
|
||||
|
||||
// new (see->Summ) value can overflow over 16-bits in some rare cases
|
||||
Ppmd_See_Update(see);
|
||||
Ppmd_See_UPDATE(see)
|
||||
p->FoundState = s;
|
||||
sym = s->Symbol;
|
||||
Ppmd7_Update2(p);
|
||||
|
@ -261,7 +261,7 @@ int Ppmd7z_DecodeSymbol(CPpmd7 *p)
|
|||
if (count >= freqSum)
|
||||
return PPMD7_SYM_ERROR;
|
||||
|
||||
RC_Decode(hiCnt, freqSum - hiCnt);
|
||||
RC_Decode(hiCnt, freqSum - hiCnt)
|
||||
|
||||
// We increase (see->Summ) for sum of Freqs of all non_Masked symbols.
|
||||
// new (see->Summ) value can overflow over 16-bits in some rare cases
|
||||
|
@ -295,3 +295,18 @@ Byte *Ppmd7z_DecodeSymbols(CPpmd7 *p, Byte *buf, const Byte *lim)
|
|||
return buf;
|
||||
}
|
||||
*/
|
||||
|
||||
#undef kTopValue
|
||||
#undef READ_BYTE
|
||||
#undef RC_NORM_BASE
|
||||
#undef RC_NORM_1
|
||||
#undef RC_NORM
|
||||
#undef RC_NORM_LOCAL
|
||||
#undef RC_NORM_REMOTE
|
||||
#undef R
|
||||
#undef RC_Decode
|
||||
#undef RC_DecodeFinal
|
||||
#undef RC_GetThreshold
|
||||
#undef CTX
|
||||
#undef SUCCESSOR
|
||||
#undef MASK
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Ppmd7Enc.c -- Ppmd7z (PPMdH with 7z Range Coder) Encoder
|
||||
2021-04-13 : Igor Pavlov : Public domain
|
||||
2023-04-02 : Igor Pavlov : Public domain
|
||||
This code is based on:
|
||||
PPMd var.H (2001): Dmitry Shkarin : Public domain */
|
||||
|
||||
|
@ -8,7 +8,7 @@ This code is based on:
|
|||
|
||||
#include "Ppmd7.h"
|
||||
|
||||
#define kTopValue (1 << 24)
|
||||
#define kTopValue ((UInt32)1 << 24)
|
||||
|
||||
#define R (&p->rc.enc)
|
||||
|
||||
|
@ -20,8 +20,8 @@ void Ppmd7z_Init_RangeEnc(CPpmd7 *p)
|
|||
R->CacheSize = 1;
|
||||
}
|
||||
|
||||
MY_NO_INLINE
|
||||
static void RangeEnc_ShiftLow(CPpmd7 *p)
|
||||
Z7_NO_INLINE
|
||||
static void Ppmd7z_RangeEnc_ShiftLow(CPpmd7 *p)
|
||||
{
|
||||
if ((UInt32)R->Low < (UInt32)0xFF000000 || (unsigned)(R->Low >> 32) != 0)
|
||||
{
|
||||
|
@ -38,53 +38,53 @@ static void RangeEnc_ShiftLow(CPpmd7 *p)
|
|||
R->Low = (UInt32)((UInt32)R->Low << 8);
|
||||
}
|
||||
|
||||
#define RC_NORM_BASE(p) if (R->Range < kTopValue) { R->Range <<= 8; RangeEnc_ShiftLow(p);
|
||||
#define RC_NORM_1(p) RC_NORM_BASE(p) }
|
||||
#define RC_NORM(p) RC_NORM_BASE(p) RC_NORM_BASE(p) }}
|
||||
#define RC_NORM_BASE(p) if (R->Range < kTopValue) { R->Range <<= 8; Ppmd7z_RangeEnc_ShiftLow(p);
|
||||
#define RC_NORM_1(p) RC_NORM_BASE(p) }
|
||||
#define RC_NORM(p) RC_NORM_BASE(p) RC_NORM_BASE(p) }}
|
||||
|
||||
// we must use only one type of Normalization from two: LOCAL or REMOTE
|
||||
#define RC_NORM_LOCAL(p) // RC_NORM(p)
|
||||
#define RC_NORM_REMOTE(p) RC_NORM(p)
|
||||
|
||||
/*
|
||||
#define RangeEnc_Encode(p, start, _size_) \
|
||||
#define Ppmd7z_RangeEnc_Encode(p, start, _size_) \
|
||||
{ UInt32 size = _size_; \
|
||||
R->Low += start * R->Range; \
|
||||
R->Range *= size; \
|
||||
RC_NORM_LOCAL(p); }
|
||||
*/
|
||||
|
||||
MY_FORCE_INLINE
|
||||
// MY_NO_INLINE
|
||||
static void RangeEnc_Encode(CPpmd7 *p, UInt32 start, UInt32 size)
|
||||
Z7_FORCE_INLINE
|
||||
// Z7_NO_INLINE
|
||||
static void Ppmd7z_RangeEnc_Encode(CPpmd7 *p, UInt32 start, UInt32 size)
|
||||
{
|
||||
R->Low += start * R->Range;
|
||||
R->Range *= size;
|
||||
RC_NORM_LOCAL(p);
|
||||
RC_NORM_LOCAL(p)
|
||||
}
|
||||
|
||||
void Ppmd7z_Flush_RangeEnc(CPpmd7 *p)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < 5; i++)
|
||||
RangeEnc_ShiftLow(p);
|
||||
Ppmd7z_RangeEnc_ShiftLow(p);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#define RC_Encode(start, size) RangeEnc_Encode(p, start, size);
|
||||
#define RC_EncodeFinal(start, size) RC_Encode(start, size); RC_NORM_REMOTE(p);
|
||||
#define RC_Encode(start, size) Ppmd7z_RangeEnc_Encode(p, start, size);
|
||||
#define RC_EncodeFinal(start, size) RC_Encode(start, size) RC_NORM_REMOTE(p)
|
||||
|
||||
#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
|
||||
#define SUFFIX(ctx) CTX((ctx)->Suffix)
|
||||
typedef CPpmd7_Context * CTX_PTR;
|
||||
// typedef CPpmd7_Context * CTX_PTR;
|
||||
#define SUCCESSOR(p) Ppmd_GET_SUCCESSOR(p)
|
||||
|
||||
void Ppmd7_UpdateModel(CPpmd7 *p);
|
||||
|
||||
#define MASK(sym) ((unsigned char *)charMask)[sym]
|
||||
|
||||
MY_FORCE_INLINE
|
||||
Z7_FORCE_INLINE
|
||||
static
|
||||
void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
||||
{
|
||||
|
@ -104,7 +104,7 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
if (s->Symbol == symbol)
|
||||
{
|
||||
// R->Range /= p->MinContext->Union2.SummFreq;
|
||||
RC_EncodeFinal(0, s->Freq);
|
||||
RC_EncodeFinal(0, s->Freq)
|
||||
p->FoundState = s;
|
||||
Ppmd7_Update1_0(p);
|
||||
return;
|
||||
|
@ -117,7 +117,7 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
if ((++s)->Symbol == symbol)
|
||||
{
|
||||
// R->Range /= p->MinContext->Union2.SummFreq;
|
||||
RC_EncodeFinal(sum, s->Freq);
|
||||
RC_EncodeFinal(sum, s->Freq)
|
||||
p->FoundState = s;
|
||||
Ppmd7_Update1(p);
|
||||
return;
|
||||
|
@ -127,10 +127,10 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
while (--i);
|
||||
|
||||
// R->Range /= p->MinContext->Union2.SummFreq;
|
||||
RC_Encode(sum, p->MinContext->Union2.SummFreq - sum);
|
||||
RC_Encode(sum, p->MinContext->Union2.SummFreq - sum)
|
||||
|
||||
p->HiBitsFlag = PPMD7_HiBitsFlag_3(p->FoundState->Symbol);
|
||||
PPMD_SetAllBitsIn256Bytes(charMask);
|
||||
PPMD_SetAllBitsIn256Bytes(charMask)
|
||||
// MASK(s->Symbol) = 0;
|
||||
// i = p->MinContext->NumStats - 1;
|
||||
// do { MASK((--s)->Symbol) = 0; } while (--i);
|
||||
|
@ -153,20 +153,20 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
UInt16 *prob = Ppmd7_GetBinSumm(p);
|
||||
CPpmd_State *s = Ppmd7Context_OneState(p->MinContext);
|
||||
UInt32 pr = *prob;
|
||||
UInt32 bound = (R->Range >> 14) * pr;
|
||||
const UInt32 bound = (R->Range >> 14) * pr;
|
||||
pr = PPMD_UPDATE_PROB_1(pr);
|
||||
if (s->Symbol == symbol)
|
||||
{
|
||||
*prob = (UInt16)(pr + (1 << PPMD_INT_BITS));
|
||||
// RangeEnc_EncodeBit_0(p, bound);
|
||||
R->Range = bound;
|
||||
RC_NORM_1(p);
|
||||
RC_NORM_1(p)
|
||||
|
||||
// p->FoundState = s;
|
||||
// Ppmd7_UpdateBin(p);
|
||||
{
|
||||
unsigned freq = s->Freq;
|
||||
CTX_PTR c = CTX(SUCCESSOR(s));
|
||||
const unsigned freq = s->Freq;
|
||||
CPpmd7_Context *c = CTX(SUCCESSOR(s));
|
||||
p->FoundState = s;
|
||||
p->PrevSuccess = 1;
|
||||
p->RunLength++;
|
||||
|
@ -187,7 +187,7 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
R->Range -= bound;
|
||||
RC_NORM_LOCAL(p)
|
||||
|
||||
PPMD_SetAllBitsIn256Bytes(charMask);
|
||||
PPMD_SetAllBitsIn256Bytes(charMask)
|
||||
MASK(s->Symbol) = 0;
|
||||
p->PrevSuccess = 0;
|
||||
}
|
||||
|
@ -248,14 +248,14 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
|
||||
do
|
||||
{
|
||||
unsigned cur = s->Symbol;
|
||||
const unsigned cur = s->Symbol;
|
||||
if ((int)cur == symbol)
|
||||
{
|
||||
UInt32 low = sum;
|
||||
UInt32 freq = s->Freq;
|
||||
const UInt32 low = sum;
|
||||
const UInt32 freq = s->Freq;
|
||||
unsigned num2;
|
||||
|
||||
Ppmd_See_Update(see);
|
||||
Ppmd_See_UPDATE(see)
|
||||
p->FoundState = s;
|
||||
sum += escFreq;
|
||||
|
||||
|
@ -279,7 +279,7 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
|
||||
|
||||
R->Range /= sum;
|
||||
RC_EncodeFinal(low, freq);
|
||||
RC_EncodeFinal(low, freq)
|
||||
Ppmd7_Update2(p);
|
||||
return;
|
||||
}
|
||||
|
@ -289,21 +289,21 @@ void Ppmd7z_EncodeSymbol(CPpmd7 *p, int symbol)
|
|||
while (--i);
|
||||
|
||||
{
|
||||
UInt32 total = sum + escFreq;
|
||||
const UInt32 total = sum + escFreq;
|
||||
see->Summ = (UInt16)(see->Summ + total);
|
||||
|
||||
R->Range /= total;
|
||||
RC_Encode(sum, escFreq);
|
||||
RC_Encode(sum, escFreq)
|
||||
}
|
||||
|
||||
{
|
||||
CPpmd_State *s2 = Ppmd7_GetStats(p, p->MinContext);
|
||||
const CPpmd_State *s2 = Ppmd7_GetStats(p, p->MinContext);
|
||||
s--;
|
||||
MASK(s->Symbol) = 0;
|
||||
do
|
||||
{
|
||||
unsigned sym0 = s2[0].Symbol;
|
||||
unsigned sym1 = s2[1].Symbol;
|
||||
const unsigned sym0 = s2[0].Symbol;
|
||||
const unsigned sym1 = s2[1].Symbol;
|
||||
s2 += 2;
|
||||
MASK(sym0) = 0;
|
||||
MASK(sym1) = 0;
|
||||
|
@ -321,3 +321,18 @@ void Ppmd7z_EncodeSymbols(CPpmd7 *p, const Byte *buf, const Byte *lim)
|
|||
Ppmd7z_EncodeSymbol(p, *buf);
|
||||
}
|
||||
}
|
||||
|
||||
#undef kTopValue
|
||||
#undef WRITE_BYTE
|
||||
#undef RC_NORM_BASE
|
||||
#undef RC_NORM_1
|
||||
#undef RC_NORM
|
||||
#undef RC_NORM_LOCAL
|
||||
#undef RC_NORM_REMOTE
|
||||
#undef R
|
||||
#undef RC_Encode
|
||||
#undef RC_EncodeFinal
|
||||
#undef SUFFIX
|
||||
#undef CTX
|
||||
#undef SUCCESSOR
|
||||
#undef MASK
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Sha256.c -- SHA-256 Hash
|
||||
2021-04-01 : Igor Pavlov : Public domain
|
||||
2023-04-02 : Igor Pavlov : Public domain
|
||||
This code is based on public domain code from Wei Dai's Crypto++ library. */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
@ -17,48 +17,48 @@ This code is based on public domain code from Wei Dai's Crypto++ library. */
|
|||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
#ifdef _MSC_VER
|
||||
#if _MSC_VER >= 1200
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#elif defined(__clang__)
|
||||
#if (__clang_major__ >= 8) // fix that check
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#if (__GNUC__ >= 8) // fix that check
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#elif defined(__INTEL_COMPILER)
|
||||
#if (__INTEL_COMPILER >= 1800) // fix that check
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(MY_CPU_ARM_OR_ARM64)
|
||||
#ifdef _MSC_VER
|
||||
#if _MSC_VER >= 1910
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#elif defined(__clang__)
|
||||
#if (__clang_major__ >= 8) // fix that check
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#if (__GNUC__ >= 6) // fix that check
|
||||
#define _SHA_SUPPORTED
|
||||
#define Z7_COMPILER_SHA256_SUPPORTED
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
|
||||
#ifdef _SHA_SUPPORTED
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
#ifdef Z7_COMPILER_SHA256_SUPPORTED
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
|
||||
static SHA256_FUNC_UPDATE_BLOCKS g_FUNC_UPDATE_BLOCKS = Sha256_UpdateBlocks;
|
||||
static SHA256_FUNC_UPDATE_BLOCKS g_FUNC_UPDATE_BLOCKS_HW;
|
||||
static SHA256_FUNC_UPDATE_BLOCKS g_SHA256_FUNC_UPDATE_BLOCKS = Sha256_UpdateBlocks;
|
||||
static SHA256_FUNC_UPDATE_BLOCKS g_SHA256_FUNC_UPDATE_BLOCKS_HW;
|
||||
|
||||
#define UPDATE_BLOCKS(p) p->func_UpdateBlocks
|
||||
#define SHA256_UPDATE_BLOCKS(p) p->func_UpdateBlocks
|
||||
#else
|
||||
#define UPDATE_BLOCKS(p) Sha256_UpdateBlocks
|
||||
#define SHA256_UPDATE_BLOCKS(p) Sha256_UpdateBlocks
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -66,16 +66,16 @@ BoolInt Sha256_SetFunction(CSha256 *p, unsigned algo)
|
|||
{
|
||||
SHA256_FUNC_UPDATE_BLOCKS func = Sha256_UpdateBlocks;
|
||||
|
||||
#ifdef _SHA_SUPPORTED
|
||||
#ifdef Z7_COMPILER_SHA256_SUPPORTED
|
||||
if (algo != SHA256_ALGO_SW)
|
||||
{
|
||||
if (algo == SHA256_ALGO_DEFAULT)
|
||||
func = g_FUNC_UPDATE_BLOCKS;
|
||||
func = g_SHA256_FUNC_UPDATE_BLOCKS;
|
||||
else
|
||||
{
|
||||
if (algo != SHA256_ALGO_HW)
|
||||
return False;
|
||||
func = g_FUNC_UPDATE_BLOCKS_HW;
|
||||
func = g_SHA256_FUNC_UPDATE_BLOCKS_HW;
|
||||
if (!func)
|
||||
return False;
|
||||
}
|
||||
|
@ -92,17 +92,18 @@ BoolInt Sha256_SetFunction(CSha256 *p, unsigned algo)
|
|||
|
||||
/* define it for speed optimization */
|
||||
|
||||
#ifdef _SFX
|
||||
#ifdef Z7_SFX
|
||||
#define STEP_PRE 1
|
||||
#define STEP_MAIN 1
|
||||
#else
|
||||
#define STEP_PRE 2
|
||||
#define STEP_MAIN 4
|
||||
// #define _SHA256_UNROLL
|
||||
// #define Z7_SHA256_UNROLL
|
||||
#endif
|
||||
|
||||
#undef Z7_SHA256_BIG_W
|
||||
#if STEP_MAIN != 16
|
||||
#define _SHA256_BIG_W
|
||||
#define Z7_SHA256_BIG_W
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -124,8 +125,8 @@ void Sha256_InitState(CSha256 *p)
|
|||
void Sha256_Init(CSha256 *p)
|
||||
{
|
||||
p->func_UpdateBlocks =
|
||||
#ifdef _SHA_SUPPORTED
|
||||
g_FUNC_UPDATE_BLOCKS;
|
||||
#ifdef Z7_COMPILER_SHA256_SUPPORTED
|
||||
g_SHA256_FUNC_UPDATE_BLOCKS;
|
||||
#else
|
||||
NULL;
|
||||
#endif
|
||||
|
@ -145,7 +146,7 @@ void Sha256_Init(CSha256 *p)
|
|||
|
||||
#define blk2_main(j, i) s1(w(j, (i)-2)) + w(j, (i)-7) + s0(w(j, (i)-15))
|
||||
|
||||
#ifdef _SHA256_BIG_W
|
||||
#ifdef Z7_SHA256_BIG_W
|
||||
// we use +i instead of +(i) to change the order to solve CLANG compiler warning for signed/unsigned.
|
||||
#define w(j, i) W[(size_t)(j) + i]
|
||||
#define blk2(j, i) (w(j, i) = w(j, (i)-16) + blk2_main(j, i))
|
||||
|
@ -176,7 +177,7 @@ void Sha256_Init(CSha256 *p)
|
|||
#define R1_PRE(i) T1( W_PRE, i)
|
||||
#define R1_MAIN(i) T1( W_MAIN, i)
|
||||
|
||||
#if (!defined(_SHA256_UNROLL) || STEP_MAIN < 8) && (STEP_MAIN >= 4)
|
||||
#if (!defined(Z7_SHA256_UNROLL) || STEP_MAIN < 8) && (STEP_MAIN >= 4)
|
||||
#define R2_MAIN(i) \
|
||||
R1_MAIN(i) \
|
||||
R1_MAIN(i + 1) \
|
||||
|
@ -185,7 +186,7 @@ void Sha256_Init(CSha256 *p)
|
|||
|
||||
|
||||
|
||||
#if defined(_SHA256_UNROLL) && STEP_MAIN >= 8
|
||||
#if defined(Z7_SHA256_UNROLL) && STEP_MAIN >= 8
|
||||
|
||||
#define T4( a,b,c,d,e,f,g,h, wx, i) \
|
||||
h += S1(e) + Ch(e,f,g) + K[(i)+(size_t)(j)] + wx(i); \
|
||||
|
@ -223,7 +224,7 @@ void Sha256_Init(CSha256 *p)
|
|||
|
||||
#endif
|
||||
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
|
||||
// static
|
||||
extern MY_ALIGN(64)
|
||||
|
@ -252,11 +253,11 @@ const UInt32 SHA256_K_ARRAY[64] = {
|
|||
#define K SHA256_K_ARRAY
|
||||
|
||||
|
||||
MY_NO_INLINE
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
Z7_NO_INLINE
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
{
|
||||
UInt32 W
|
||||
#ifdef _SHA256_BIG_W
|
||||
#ifdef Z7_SHA256_BIG_W
|
||||
[64];
|
||||
#else
|
||||
[16];
|
||||
|
@ -266,7 +267,7 @@ void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t
|
|||
|
||||
UInt32 a,b,c,d,e,f,g,h;
|
||||
|
||||
#if !defined(_SHA256_UNROLL) || (STEP_MAIN <= 4) || (STEP_PRE <= 4)
|
||||
#if !defined(Z7_SHA256_UNROLL) || (STEP_MAIN <= 4) || (STEP_PRE <= 4)
|
||||
UInt32 tmp;
|
||||
#endif
|
||||
|
||||
|
@ -297,12 +298,12 @@ void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t
|
|||
|
||||
#else
|
||||
|
||||
R1_PRE(0);
|
||||
R1_PRE(0)
|
||||
#if STEP_PRE >= 2
|
||||
R1_PRE(1);
|
||||
R1_PRE(1)
|
||||
#if STEP_PRE >= 4
|
||||
R1_PRE(2);
|
||||
R1_PRE(3);
|
||||
R1_PRE(2)
|
||||
R1_PRE(3)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -311,32 +312,32 @@ void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t
|
|||
|
||||
for (j = 16; j < 64; j += STEP_MAIN)
|
||||
{
|
||||
#if defined(_SHA256_UNROLL) && STEP_MAIN >= 8
|
||||
#if defined(Z7_SHA256_UNROLL) && STEP_MAIN >= 8
|
||||
|
||||
#if STEP_MAIN < 8
|
||||
R4_MAIN(0);
|
||||
R4_MAIN(0)
|
||||
#else
|
||||
R8_MAIN(0);
|
||||
R8_MAIN(0)
|
||||
#if STEP_MAIN == 16
|
||||
R8_MAIN(8);
|
||||
R8_MAIN(8)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
R1_MAIN(0);
|
||||
R1_MAIN(0)
|
||||
#if STEP_MAIN >= 2
|
||||
R1_MAIN(1);
|
||||
R1_MAIN(1)
|
||||
#if STEP_MAIN >= 4
|
||||
R2_MAIN(2);
|
||||
R2_MAIN(2)
|
||||
#if STEP_MAIN >= 8
|
||||
R2_MAIN(4);
|
||||
R2_MAIN(6);
|
||||
R2_MAIN(4)
|
||||
R2_MAIN(6)
|
||||
#if STEP_MAIN >= 16
|
||||
R2_MAIN(8);
|
||||
R2_MAIN(10);
|
||||
R2_MAIN(12);
|
||||
R2_MAIN(14);
|
||||
R2_MAIN(8)
|
||||
R2_MAIN(10)
|
||||
R2_MAIN(12)
|
||||
R2_MAIN(14)
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
@ -367,7 +368,7 @@ void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t
|
|||
#undef s1
|
||||
#undef K
|
||||
|
||||
#define Sha256_UpdateBlock(p) UPDATE_BLOCKS(p)(p->state, p->buffer, 1)
|
||||
#define Sha256_UpdateBlock(p) SHA256_UPDATE_BLOCKS(p)(p->state, p->buffer, 1)
|
||||
|
||||
void Sha256_Update(CSha256 *p, const Byte *data, size_t size)
|
||||
{
|
||||
|
@ -397,7 +398,7 @@ void Sha256_Update(CSha256 *p, const Byte *data, size_t size)
|
|||
}
|
||||
{
|
||||
size_t numBlocks = size >> 6;
|
||||
UPDATE_BLOCKS(p)(p->state, data, numBlocks);
|
||||
SHA256_UPDATE_BLOCKS(p)(p->state, data, numBlocks);
|
||||
size &= 0x3F;
|
||||
if (size == 0)
|
||||
return;
|
||||
|
@ -441,8 +442,8 @@ void Sha256_Final(CSha256 *p, Byte *digest)
|
|||
|
||||
{
|
||||
UInt64 numBits = (p->count << 3);
|
||||
SetBe32(p->buffer + 64 - 8, (UInt32)(numBits >> 32));
|
||||
SetBe32(p->buffer + 64 - 4, (UInt32)(numBits));
|
||||
SetBe32(p->buffer + 64 - 8, (UInt32)(numBits >> 32))
|
||||
SetBe32(p->buffer + 64 - 4, (UInt32)(numBits))
|
||||
}
|
||||
|
||||
Sha256_UpdateBlock(p);
|
||||
|
@ -451,8 +452,8 @@ void Sha256_Final(CSha256 *p, Byte *digest)
|
|||
{
|
||||
UInt32 v0 = p->state[i];
|
||||
UInt32 v1 = p->state[(size_t)i + 1];
|
||||
SetBe32(digest , v0);
|
||||
SetBe32(digest + 4, v1);
|
||||
SetBe32(digest , v0)
|
||||
SetBe32(digest + 4, v1)
|
||||
digest += 8;
|
||||
}
|
||||
|
||||
|
@ -460,9 +461,9 @@ void Sha256_Final(CSha256 *p, Byte *digest)
|
|||
}
|
||||
|
||||
|
||||
void Sha256Prepare()
|
||||
void Sha256Prepare(void)
|
||||
{
|
||||
#ifdef _SHA_SUPPORTED
|
||||
#ifdef Z7_COMPILER_SHA256_SUPPORTED
|
||||
SHA256_FUNC_UPDATE_BLOCKS f, f_hw;
|
||||
f = Sha256_UpdateBlocks;
|
||||
f_hw = NULL;
|
||||
|
@ -480,7 +481,36 @@ void Sha256Prepare()
|
|||
// printf("\n========== HW SHA256 ======== \n");
|
||||
f = f_hw = Sha256_UpdateBlocks_HW;
|
||||
}
|
||||
g_FUNC_UPDATE_BLOCKS = f;
|
||||
g_FUNC_UPDATE_BLOCKS_HW = f_hw;
|
||||
g_SHA256_FUNC_UPDATE_BLOCKS = f;
|
||||
g_SHA256_FUNC_UPDATE_BLOCKS_HW = f_hw;
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef S0
|
||||
#undef S1
|
||||
#undef s0
|
||||
#undef s1
|
||||
#undef Ch
|
||||
#undef Maj
|
||||
#undef W_MAIN
|
||||
#undef W_PRE
|
||||
#undef w
|
||||
#undef blk2_main
|
||||
#undef blk2
|
||||
#undef T1
|
||||
#undef T4
|
||||
#undef T8
|
||||
#undef R1_PRE
|
||||
#undef R1_MAIN
|
||||
#undef R2_MAIN
|
||||
#undef R4
|
||||
#undef R4_PRE
|
||||
#undef R4_MAIN
|
||||
#undef R8
|
||||
#undef R8_PRE
|
||||
#undef R8_MAIN
|
||||
#undef STEP_PRE
|
||||
#undef STEP_MAIN
|
||||
#undef Z7_SHA256_BIG_W
|
||||
#undef Z7_SHA256_UNROLL
|
||||
#undef Z7_COMPILER_SHA256_SUPPORTED
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
/* Sha256Opt.c -- SHA-256 optimized code for SHA-256 hardware instructions
|
||||
2021-04-01 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
#include "Compiler.h"
|
||||
#include "CpuArch.h"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#if (_MSC_VER < 1900) && (_MSC_VER >= 1200)
|
||||
|
@ -9,41 +11,26 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#include "CpuArch.h"
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
#if defined(__clang__)
|
||||
#if (__clang_major__ >= 8) // fix that check
|
||||
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1600) // fix that check
|
||||
#define USE_HW_SHA
|
||||
#ifndef __SHA__
|
||||
#elif defined(Z7_LLVM_CLANG_VERSION) && (Z7_LLVM_CLANG_VERSION >= 30800) \
|
||||
|| defined(Z7_APPLE_CLANG_VERSION) && (Z7_APPLE_CLANG_VERSION >= 50100) \
|
||||
|| defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40900)
|
||||
#define USE_HW_SHA
|
||||
#if !defined(_INTEL_COMPILER)
|
||||
// icc defines __GNUC__, but icc doesn't support __attribute__(__target__)
|
||||
#if !defined(__SHA__) || !defined(__SSSE3__)
|
||||
#define ATTRIB_SHA __attribute__((__target__("sha,ssse3")))
|
||||
#if defined(_MSC_VER)
|
||||
// SSSE3: for clang-cl:
|
||||
#include <tmmintrin.h>
|
||||
#define __SHA__
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#elif defined(__GNUC__)
|
||||
#if (__GNUC__ >= 8) // fix that check
|
||||
#define USE_HW_SHA
|
||||
#ifndef __SHA__
|
||||
#define ATTRIB_SHA __attribute__((__target__("sha,ssse3")))
|
||||
// #pragma GCC target("sha,ssse3")
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(__INTEL_COMPILER)
|
||||
#if (__INTEL_COMPILER >= 1800) // fix that check
|
||||
#define USE_HW_SHA
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
#ifdef USE_MY_MM
|
||||
#define USE_VER_MIN 1300
|
||||
#else
|
||||
#define USE_VER_MIN 1910
|
||||
#define USE_VER_MIN 1900
|
||||
#endif
|
||||
#if _MSC_VER >= USE_VER_MIN
|
||||
#if (_MSC_VER >= USE_VER_MIN)
|
||||
#define USE_HW_SHA
|
||||
#endif
|
||||
#endif
|
||||
|
@ -52,16 +39,19 @@
|
|||
#ifdef USE_HW_SHA
|
||||
|
||||
// #pragma message("Sha256 HW")
|
||||
// #include <wmmintrin.h>
|
||||
|
||||
#if !defined(_MSC_VER) || (_MSC_VER >= 1900)
|
||||
// sse/sse2/ssse3:
|
||||
#include <tmmintrin.h>
|
||||
// sha*:
|
||||
#include <immintrin.h>
|
||||
#else
|
||||
#include <emmintrin.h>
|
||||
|
||||
#if defined(_MSC_VER) && (_MSC_VER >= 1600)
|
||||
// #include <intrin.h>
|
||||
#endif
|
||||
#if defined (__clang__) && defined(_MSC_VER)
|
||||
// #if !defined(__SSSE3__)
|
||||
// #endif
|
||||
#if !defined(__SHA__)
|
||||
#include <shaintrin.h>
|
||||
#endif
|
||||
#else
|
||||
|
||||
#ifdef USE_MY_MM
|
||||
#include "My_mm.h"
|
||||
|
@ -98,9 +88,9 @@ const UInt32 SHA256_K_ARRAY[64];
|
|||
#define K SHA256_K_ARRAY
|
||||
|
||||
|
||||
#define ADD_EPI32(dest, src) dest = _mm_add_epi32(dest, src);
|
||||
#define SHA256_MSG1(dest, src) dest = _mm_sha256msg1_epu32(dest, src);
|
||||
#define SHA25G_MSG2(dest, src) dest = _mm_sha256msg2_epu32(dest, src);
|
||||
#define ADD_EPI32(dest, src) dest = _mm_add_epi32(dest, src);
|
||||
#define SHA256_MSG1(dest, src) dest = _mm_sha256msg1_epu32(dest, src);
|
||||
#define SHA25G_MSG2(dest, src) dest = _mm_sha256msg2_epu32(dest, src);
|
||||
|
||||
|
||||
#define LOAD_SHUFFLE(m, k) \
|
||||
|
@ -112,7 +102,7 @@ const UInt32 SHA256_K_ARRAY[64];
|
|||
|
||||
#define SM2(g0, g1, g2, g3) \
|
||||
tmp = _mm_alignr_epi8(g1, g0, 4); \
|
||||
ADD_EPI32(g2, tmp); \
|
||||
ADD_EPI32(g2, tmp) \
|
||||
SHA25G_MSG2(g2, g1); \
|
||||
|
||||
// #define LS0(k, g0, g1, g2, g3) LOAD_SHUFFLE(g0, k)
|
||||
|
@ -138,16 +128,16 @@ const UInt32 SHA256_K_ARRAY[64];
|
|||
// We use scheme with 3 rounds ahead for SHA256_MSG1 / 2 rounds ahead for SHA256_MSG2
|
||||
|
||||
#define R4(k, g0, g1, g2, g3, OP0, OP1) \
|
||||
RND2_0(g0, k); \
|
||||
OP0(g0, g1, g2, g3); \
|
||||
RND2_1; \
|
||||
OP1(g0, g1, g2, g3); \
|
||||
RND2_0(g0, k) \
|
||||
OP0(g0, g1, g2, g3) \
|
||||
RND2_1 \
|
||||
OP1(g0, g1, g2, g3) \
|
||||
|
||||
#define R16(k, OP0, OP1, OP2, OP3, OP4, OP5, OP6, OP7) \
|
||||
R4 ( (k)*4+0, m0, m1, m2, m3, OP0, OP1 ) \
|
||||
R4 ( (k)*4+1, m1, m2, m3, m0, OP2, OP3 ) \
|
||||
R4 ( (k)*4+2, m2, m3, m0, m1, OP4, OP5 ) \
|
||||
R4 ( (k)*4+3, m3, m0, m1, m2, OP6, OP7 ) \
|
||||
R4 ( (k)*4+0, m0,m1,m2,m3, OP0, OP1 ) \
|
||||
R4 ( (k)*4+1, m1,m2,m3,m0, OP2, OP3 ) \
|
||||
R4 ( (k)*4+2, m2,m3,m0,m1, OP4, OP5 ) \
|
||||
R4 ( (k)*4+3, m3,m0,m1,m2, OP6, OP7 ) \
|
||||
|
||||
#define PREPARE_STATE \
|
||||
tmp = _mm_shuffle_epi32(state0, 0x1B); /* abcd */ \
|
||||
|
@ -157,11 +147,11 @@ const UInt32 SHA256_K_ARRAY[64];
|
|||
state1 = _mm_unpackhi_epi64(state1, tmp); /* abef */ \
|
||||
|
||||
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
#ifdef ATTRIB_SHA
|
||||
ATTRIB_SHA
|
||||
#endif
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
{
|
||||
const __m128i mask = _mm_set_epi32(0x0c0d0e0f, 0x08090a0b, 0x04050607, 0x00010203);
|
||||
__m128i tmp;
|
||||
|
@ -192,13 +182,13 @@ void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size
|
|||
|
||||
|
||||
|
||||
R16 ( 0, NNN, NNN, SM1, NNN, SM1, SM2, SM1, SM2 );
|
||||
R16 ( 1, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 );
|
||||
R16 ( 2, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 );
|
||||
R16 ( 3, SM1, SM2, NNN, SM2, NNN, NNN, NNN, NNN );
|
||||
R16 ( 0, NNN, NNN, SM1, NNN, SM1, SM2, SM1, SM2 )
|
||||
R16 ( 1, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 )
|
||||
R16 ( 2, SM1, SM2, SM1, SM2, SM1, SM2, SM1, SM2 )
|
||||
R16 ( 3, SM1, SM2, NNN, SM2, NNN, NNN, NNN, NNN )
|
||||
|
||||
ADD_EPI32(state0, state0_save);
|
||||
ADD_EPI32(state1, state1_save);
|
||||
ADD_EPI32(state0, state0_save)
|
||||
ADD_EPI32(state1, state1_save)
|
||||
|
||||
data += 64;
|
||||
}
|
||||
|
@ -298,11 +288,11 @@ const UInt32 SHA256_K_ARRAY[64];
|
|||
R4 ( (k)*4+3, m3, m0, m1, m2, OP6, OP7 ) \
|
||||
|
||||
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
#ifdef ATTRIB_SHA
|
||||
ATTRIB_SHA
|
||||
#endif
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
{
|
||||
v128 state0, state1;
|
||||
|
||||
|
@ -353,12 +343,12 @@ void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size
|
|||
// #include <stdlib.h>
|
||||
|
||||
// #include "Sha256.h"
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
|
||||
#pragma message("Sha256 HW-SW stub was used")
|
||||
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks);
|
||||
void Z7_FASTCALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size_t numBlocks)
|
||||
{
|
||||
Sha256_UpdateBlocks(state, data, numBlocks);
|
||||
/*
|
||||
|
@ -371,3 +361,26 @@ void MY_FAST_CALL Sha256_UpdateBlocks_HW(UInt32 state[8], const Byte *data, size
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#undef K
|
||||
#undef RND2
|
||||
#undef RND2_0
|
||||
#undef RND2_1
|
||||
|
||||
#undef MY_rev32_for_LE
|
||||
#undef NNN
|
||||
#undef LOAD_128
|
||||
#undef STORE_128
|
||||
#undef LOAD_SHUFFLE
|
||||
#undef SM1
|
||||
#undef SM2
|
||||
|
||||
#undef NNN
|
||||
#undef R4
|
||||
#undef R16
|
||||
#undef PREPARE_STATE
|
||||
#undef USE_HW_SHA
|
||||
#undef ATTRIB_SHA
|
||||
#undef USE_VER_MIN
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
/* Sort.c -- Sort functions
|
||||
2014-04-05 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "Sort.h"
|
||||
|
||||
#define HeapSortDown(p, k, size, temp) \
|
||||
{ for (;;) { \
|
||||
size_t s = (k << 1); \
|
||||
if (s > size) break; \
|
||||
if (s < size && p[s + 1] > p[s]) s++; \
|
||||
if (temp >= p[s]) break; \
|
||||
p[k] = p[s]; k = s; \
|
||||
} p[k] = temp; }
|
||||
|
||||
void HeapSort(UInt32 *p, size_t size)
|
||||
{
|
||||
if (size <= 1)
|
||||
return;
|
||||
p--;
|
||||
{
|
||||
size_t i = size / 2;
|
||||
do
|
||||
{
|
||||
UInt32 temp = p[i];
|
||||
size_t k = i;
|
||||
HeapSortDown(p, k, size, temp)
|
||||
}
|
||||
while (--i != 0);
|
||||
}
|
||||
/*
|
||||
do
|
||||
{
|
||||
size_t k = 1;
|
||||
UInt32 temp = p[size];
|
||||
p[size--] = p[1];
|
||||
HeapSortDown(p, k, size, temp)
|
||||
}
|
||||
while (size > 1);
|
||||
*/
|
||||
while (size > 3)
|
||||
{
|
||||
UInt32 temp = p[size];
|
||||
size_t k = (p[3] > p[2]) ? 3 : 2;
|
||||
p[size--] = p[1];
|
||||
p[1] = p[k];
|
||||
HeapSortDown(p, k, size, temp)
|
||||
}
|
||||
{
|
||||
UInt32 temp = p[size];
|
||||
p[size] = p[1];
|
||||
if (size > 2 && p[2] < temp)
|
||||
{
|
||||
p[1] = p[2];
|
||||
p[2] = temp;
|
||||
}
|
||||
else
|
||||
p[1] = temp;
|
||||
}
|
||||
}
|
||||
|
||||
void HeapSort64(UInt64 *p, size_t size)
|
||||
{
|
||||
if (size <= 1)
|
||||
return;
|
||||
p--;
|
||||
{
|
||||
size_t i = size / 2;
|
||||
do
|
||||
{
|
||||
UInt64 temp = p[i];
|
||||
size_t k = i;
|
||||
HeapSortDown(p, k, size, temp)
|
||||
}
|
||||
while (--i != 0);
|
||||
}
|
||||
/*
|
||||
do
|
||||
{
|
||||
size_t k = 1;
|
||||
UInt64 temp = p[size];
|
||||
p[size--] = p[1];
|
||||
HeapSortDown(p, k, size, temp)
|
||||
}
|
||||
while (size > 1);
|
||||
*/
|
||||
while (size > 3)
|
||||
{
|
||||
UInt64 temp = p[size];
|
||||
size_t k = (p[3] > p[2]) ? 3 : 2;
|
||||
p[size--] = p[1];
|
||||
p[1] = p[k];
|
||||
HeapSortDown(p, k, size, temp)
|
||||
}
|
||||
{
|
||||
UInt64 temp = p[size];
|
||||
p[size] = p[1];
|
||||
if (size > 2 && p[2] < temp)
|
||||
{
|
||||
p[1] = p[2];
|
||||
p[2] = temp;
|
||||
}
|
||||
else
|
||||
p[1] = temp;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
#define HeapSortRefDown(p, vals, n, size, temp) \
|
||||
{ size_t k = n; UInt32 val = vals[temp]; for (;;) { \
|
||||
size_t s = (k << 1); \
|
||||
if (s > size) break; \
|
||||
if (s < size && vals[p[s + 1]] > vals[p[s]]) s++; \
|
||||
if (val >= vals[p[s]]) break; \
|
||||
p[k] = p[s]; k = s; \
|
||||
} p[k] = temp; }
|
||||
|
||||
void HeapSortRef(UInt32 *p, UInt32 *vals, size_t size)
|
||||
{
|
||||
if (size <= 1)
|
||||
return;
|
||||
p--;
|
||||
{
|
||||
size_t i = size / 2;
|
||||
do
|
||||
{
|
||||
UInt32 temp = p[i];
|
||||
HeapSortRefDown(p, vals, i, size, temp);
|
||||
}
|
||||
while (--i != 0);
|
||||
}
|
||||
do
|
||||
{
|
||||
UInt32 temp = p[size];
|
||||
p[size--] = p[1];
|
||||
HeapSortRefDown(p, vals, 1, size, temp);
|
||||
}
|
||||
while (size > 1);
|
||||
}
|
||||
*/
|
|
@ -0,0 +1,800 @@
|
|||
/* SwapBytes.c -- Byte Swap conversion filter
|
||||
2023-04-07 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#include "Compiler.h"
|
||||
#include "CpuArch.h"
|
||||
#include "RotateDefs.h"
|
||||
#include "SwapBytes.h"
|
||||
|
||||
typedef UInt16 CSwapUInt16;
|
||||
typedef UInt32 CSwapUInt32;
|
||||
|
||||
// #define k_SwapBytes_Mode_BASE 0
|
||||
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
|
||||
#define k_SwapBytes_Mode_SSE2 1
|
||||
#define k_SwapBytes_Mode_SSSE3 2
|
||||
#define k_SwapBytes_Mode_AVX2 3
|
||||
|
||||
// #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
|
||||
#if defined(__clang__) && (__clang_major__ >= 4) \
|
||||
|| defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40701)
|
||||
#define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_AVX2
|
||||
#define SWAP_ATTRIB_SSE2 __attribute__((__target__("sse2")))
|
||||
#define SWAP_ATTRIB_SSSE3 __attribute__((__target__("ssse3")))
|
||||
#define SWAP_ATTRIB_AVX2 __attribute__((__target__("avx2")))
|
||||
#elif defined(_MSC_VER)
|
||||
#if (_MSC_VER == 1900)
|
||||
#pragma warning(disable : 4752) // found Intel(R) Advanced Vector Extensions; consider using /arch:AVX
|
||||
#endif
|
||||
#if (_MSC_VER >= 1900)
|
||||
#define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_AVX2
|
||||
#elif (_MSC_VER >= 1500) // (VS2008)
|
||||
#define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_SSSE3
|
||||
#elif (_MSC_VER >= 1310) // (VS2003)
|
||||
#define k_SwapBytes_Mode_MAX k_SwapBytes_Mode_SSE2
|
||||
#endif
|
||||
#endif // _MSC_VER
|
||||
|
||||
/*
|
||||
// for debug
|
||||
#ifdef k_SwapBytes_Mode_MAX
|
||||
#undef k_SwapBytes_Mode_MAX
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifndef k_SwapBytes_Mode_MAX
|
||||
#define k_SwapBytes_Mode_MAX 0
|
||||
#endif
|
||||
|
||||
#if (k_SwapBytes_Mode_MAX != 0) && defined(MY_CPU_AMD64)
|
||||
#define k_SwapBytes_Mode_MIN k_SwapBytes_Mode_SSE2
|
||||
#else
|
||||
#define k_SwapBytes_Mode_MIN 0
|
||||
#endif
|
||||
|
||||
#if (k_SwapBytes_Mode_MAX >= k_SwapBytes_Mode_AVX2)
|
||||
#define USE_SWAP_AVX2
|
||||
#endif
|
||||
#if (k_SwapBytes_Mode_MAX >= k_SwapBytes_Mode_SSSE3)
|
||||
#define USE_SWAP_SSSE3
|
||||
#endif
|
||||
#if (k_SwapBytes_Mode_MAX >= k_SwapBytes_Mode_SSE2)
|
||||
#define USE_SWAP_128
|
||||
#endif
|
||||
|
||||
#if k_SwapBytes_Mode_MAX <= k_SwapBytes_Mode_MIN || !defined(USE_SWAP_128)
|
||||
#define FORCE_SWAP_MODE
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef USE_SWAP_128
|
||||
/*
|
||||
<mmintrin.h> MMX
|
||||
<xmmintrin.h> SSE
|
||||
<emmintrin.h> SSE2
|
||||
<pmmintrin.h> SSE3
|
||||
<tmmintrin.h> SSSE3
|
||||
<smmintrin.h> SSE4.1
|
||||
<nmmintrin.h> SSE4.2
|
||||
<ammintrin.h> SSE4A
|
||||
<wmmintrin.h> AES
|
||||
<immintrin.h> AVX, AVX2, FMA
|
||||
*/
|
||||
|
||||
#include <emmintrin.h> // sse2
|
||||
// typedef __m128i v128;
|
||||
|
||||
#define SWAP2_128(i) { \
|
||||
const __m128i v = *(const __m128i *)(const void *)(items + (i) * 8); \
|
||||
*( __m128i *)( void *)(items + (i) * 8) = \
|
||||
_mm_or_si128( \
|
||||
_mm_slli_epi16(v, 8), \
|
||||
_mm_srli_epi16(v, 8)); }
|
||||
// _mm_or_si128() has more ports to execute than _mm_add_epi16().
|
||||
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_SSE2
|
||||
SWAP_ATTRIB_SSE2
|
||||
#endif
|
||||
void
|
||||
Z7_FASTCALL
|
||||
SwapBytes2_128(CSwapUInt16 *items, const CSwapUInt16 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
|
||||
SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
/*
|
||||
// sse2
|
||||
#define SWAP4_128_pack(i) { \
|
||||
__m128i v = *(const __m128i *)(const void *)(items + (i) * 4); \
|
||||
__m128i v0 = _mm_unpacklo_epi8(v, mask); \
|
||||
__m128i v1 = _mm_unpackhi_epi8(v, mask); \
|
||||
v0 = _mm_shufflelo_epi16(v0, 0x1b); \
|
||||
v1 = _mm_shufflelo_epi16(v1, 0x1b); \
|
||||
v0 = _mm_shufflehi_epi16(v0, 0x1b); \
|
||||
v1 = _mm_shufflehi_epi16(v1, 0x1b); \
|
||||
*(__m128i *)(void *)(items + (i) * 4) = _mm_packus_epi16(v0, v1); }
|
||||
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_SSE2
|
||||
SWAP_ATTRIB_SSE2
|
||||
#endif
|
||||
void
|
||||
Z7_FASTCALL
|
||||
SwapBytes4_128_pack(CSwapUInt32 *items, const CSwapUInt32 *lim)
|
||||
{
|
||||
const __m128i mask = _mm_setzero_si128();
|
||||
// const __m128i mask = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP4_128_pack(0); items += 1 * 4;
|
||||
// SWAP4_128_pack(0); SWAP4_128_pack(1); items += 2 * 4;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
// sse2
|
||||
#define SWAP4_128_shift(i) { \
|
||||
__m128i v = *(const __m128i *)(const void *)(items + (i) * 4); \
|
||||
__m128i v2; \
|
||||
v2 = _mm_or_si128( \
|
||||
_mm_slli_si128(_mm_and_si128(v, mask), 1), \
|
||||
_mm_and_si128(_mm_srli_si128(v, 1), mask)); \
|
||||
v = _mm_or_si128( \
|
||||
_mm_slli_epi32(v, 24), \
|
||||
_mm_srli_epi32(v, 24)); \
|
||||
*(__m128i *)(void *)(items + (i) * 4) = _mm_or_si128(v2, v); }
|
||||
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_SSE2
|
||||
SWAP_ATTRIB_SSE2
|
||||
#endif
|
||||
void
|
||||
Z7_FASTCALL
|
||||
SwapBytes4_128_shift(CSwapUInt32 *items, const CSwapUInt32 *lim)
|
||||
{
|
||||
#define M1 0xff00
|
||||
const __m128i mask = _mm_set_epi32(M1, M1, M1, M1);
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
// SWAP4_128_shift(0) SWAP4_128_shift(1) items += 2 * 4;
|
||||
// SWAP4_128_shift(0) SWAP4_128_shift(1) items += 2 * 4;
|
||||
SWAP4_128_shift(0); items += 1 * 4;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
#if defined(USE_SWAP_SSSE3) || defined(USE_SWAP_AVX2)
|
||||
|
||||
#define SWAP_SHUF_REV_SEQ_2_VALS(v) (v)+1, (v)
|
||||
#define SWAP_SHUF_REV_SEQ_4_VALS(v) (v)+3, (v)+2, (v)+1, (v)
|
||||
|
||||
#define SWAP2_SHUF_MASK_16_BYTES \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (0 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (1 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (2 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (3 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (4 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (5 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (6 * 2), \
|
||||
SWAP_SHUF_REV_SEQ_2_VALS (7 * 2)
|
||||
|
||||
#define SWAP4_SHUF_MASK_16_BYTES \
|
||||
SWAP_SHUF_REV_SEQ_4_VALS (0 * 4), \
|
||||
SWAP_SHUF_REV_SEQ_4_VALS (1 * 4), \
|
||||
SWAP_SHUF_REV_SEQ_4_VALS (2 * 4), \
|
||||
SWAP_SHUF_REV_SEQ_4_VALS (3 * 4)
|
||||
|
||||
#if defined(USE_SWAP_AVX2)
|
||||
/* if we use 256_BIT_INIT_MASK, each static array mask will be larger for 16 bytes */
|
||||
// #define SWAP_USE_256_BIT_INIT_MASK
|
||||
#endif
|
||||
|
||||
#if defined(SWAP_USE_256_BIT_INIT_MASK) && defined(USE_SWAP_AVX2)
|
||||
#define SWAP_MASK_INIT_SIZE 32
|
||||
#else
|
||||
#define SWAP_MASK_INIT_SIZE 16
|
||||
#endif
|
||||
|
||||
MY_ALIGN(SWAP_MASK_INIT_SIZE)
|
||||
static const Byte k_ShufMask_Swap2[] =
|
||||
{
|
||||
SWAP2_SHUF_MASK_16_BYTES
|
||||
#if SWAP_MASK_INIT_SIZE > 16
|
||||
, SWAP2_SHUF_MASK_16_BYTES
|
||||
#endif
|
||||
};
|
||||
|
||||
MY_ALIGN(SWAP_MASK_INIT_SIZE)
|
||||
static const Byte k_ShufMask_Swap4[] =
|
||||
{
|
||||
SWAP4_SHUF_MASK_16_BYTES
|
||||
#if SWAP_MASK_INIT_SIZE > 16
|
||||
, SWAP4_SHUF_MASK_16_BYTES
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#ifdef USE_SWAP_SSSE3
|
||||
|
||||
#include <tmmintrin.h> // ssse3
|
||||
|
||||
#define SHUF_128(i) *(items + (i)) = \
|
||||
_mm_shuffle_epi8(*(items + (i)), mask); // SSSE3
|
||||
|
||||
// Z7_NO_INLINE
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_SSSE3
|
||||
SWAP_ATTRIB_SSSE3
|
||||
#endif
|
||||
Z7_ATTRIB_NO_VECTORIZE
|
||||
void
|
||||
Z7_FASTCALL
|
||||
ShufBytes_128(void *items8, const void *lim8, const void *mask128_ptr)
|
||||
{
|
||||
__m128i *items = (__m128i *)items8;
|
||||
const __m128i *lim = (const __m128i *)lim8;
|
||||
// const __m128i mask = _mm_set_epi8(SHUF_SWAP2_MASK_16_VALS);
|
||||
// const __m128i mask = _mm_set_epi8(SHUF_SWAP4_MASK_16_VALS);
|
||||
// const __m128i mask = _mm_load_si128((const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
|
||||
// const __m128i mask = _mm_load_si128((const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
|
||||
// const __m128i mask = *(const __m128i *)(const void *)&(k_ShufMask_Swap4[0]);
|
||||
const __m128i mask = *(const __m128i *)mask128_ptr;
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SHUF_128(0) SHUF_128(1) items += 2;
|
||||
SHUF_128(0) SHUF_128(1) items += 2;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
#endif // USE_SWAP_SSSE3
|
||||
|
||||
|
||||
|
||||
#ifdef USE_SWAP_AVX2
|
||||
|
||||
#include <immintrin.h> // avx, avx2
|
||||
#if defined(__clang__)
|
||||
#include <avxintrin.h>
|
||||
#include <avx2intrin.h>
|
||||
#endif
|
||||
|
||||
#define SHUF_256(i) *(items + (i)) = \
|
||||
_mm256_shuffle_epi8(*(items + (i)), mask); // AVX2
|
||||
|
||||
// Z7_NO_INLINE
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_AVX2
|
||||
SWAP_ATTRIB_AVX2
|
||||
#endif
|
||||
Z7_ATTRIB_NO_VECTORIZE
|
||||
void
|
||||
Z7_FASTCALL
|
||||
ShufBytes_256(void *items8, const void *lim8, const void *mask128_ptr)
|
||||
{
|
||||
__m256i *items = (__m256i *)items8;
|
||||
const __m256i *lim = (const __m256i *)lim8;
|
||||
/*
|
||||
UNUSED_VAR(mask128_ptr)
|
||||
__m256i mask =
|
||||
for Swap4: _mm256_setr_epi8(SWAP4_SHUF_MASK_16_BYTES, SWAP4_SHUF_MASK_16_BYTES);
|
||||
for Swap2: _mm256_setr_epi8(SWAP2_SHUF_MASK_16_BYTES, SWAP2_SHUF_MASK_16_BYTES);
|
||||
*/
|
||||
const __m256i mask =
|
||||
#if SWAP_MASK_INIT_SIZE > 16
|
||||
*(const __m256i *)(const void *)mask128_ptr;
|
||||
#else
|
||||
/* msvc: broadcastsi128() version reserves the stack for no reason
|
||||
msvc 19.29-: _mm256_insertf128_si256() / _mm256_set_m128i)) versions use non-avx movdqu xmm0,XMMWORD PTR [r8]
|
||||
msvc 19.30+ (VS2022): replaces _mm256_set_m128i(m,m) to vbroadcastf128(m) as we want
|
||||
*/
|
||||
// _mm256_broadcastsi128_si256(*mask128_ptr);
|
||||
/*
|
||||
#define MY_mm256_set_m128i(hi, lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1)
|
||||
MY_mm256_set_m128i
|
||||
*/
|
||||
_mm256_set_m128i(
|
||||
*(const __m128i *)mask128_ptr,
|
||||
*(const __m128i *)mask128_ptr);
|
||||
#endif
|
||||
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SHUF_256(0) SHUF_256(1) items += 2;
|
||||
SHUF_256(0) SHUF_256(1) items += 2;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
#endif // USE_SWAP_AVX2
|
||||
#endif // USE_SWAP_SSSE3 || USE_SWAP_AVX2
|
||||
#endif // USE_SWAP_128
|
||||
|
||||
|
||||
|
||||
// compile message "NEON intrinsics not available with the soft-float ABI"
|
||||
#elif defined(MY_CPU_ARM_OR_ARM64) || \
|
||||
(defined(__ARM_ARCH) && (__ARM_ARCH >= 7))
|
||||
// #elif defined(MY_CPU_ARM64)
|
||||
|
||||
#if defined(__clang__) && (__clang_major__ >= 8) \
|
||||
|| defined(__GNUC__) && (__GNUC__ >= 8)
|
||||
#if (defined(__ARM_ARCH) && (__ARM_ARCH >= 7)) \
|
||||
|| defined(MY_CPU_ARM64)
|
||||
#define USE_SWAP_128
|
||||
#endif
|
||||
#ifdef MY_CPU_ARM64
|
||||
// #define SWAP_ATTRIB_NEON __attribute__((__target__("")))
|
||||
#else
|
||||
// #define SWAP_ATTRIB_NEON __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
#if (_MSC_VER >= 1910)
|
||||
#define USE_SWAP_128
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && defined(MY_CPU_ARM64)
|
||||
#include <arm64_neon.h>
|
||||
#else
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
#ifndef USE_SWAP_128
|
||||
#define FORCE_SWAP_MODE
|
||||
#else
|
||||
|
||||
#ifdef MY_CPU_ARM64
|
||||
// for debug : comment it
|
||||
#define FORCE_SWAP_MODE
|
||||
#else
|
||||
#define k_SwapBytes_Mode_NEON 1
|
||||
#endif
|
||||
// typedef uint8x16_t v128;
|
||||
#define SWAP2_128(i) *(uint8x16_t *) (void *)(items + (i) * 8) = \
|
||||
vrev16q_u8(*(const uint8x16_t *)(const void *)(items + (i) * 8));
|
||||
#define SWAP4_128(i) *(uint8x16_t *) (void *)(items + (i) * 4) = \
|
||||
vrev32q_u8(*(const uint8x16_t *)(const void *)(items + (i) * 4));
|
||||
|
||||
// Z7_NO_INLINE
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_NEON
|
||||
SWAP_ATTRIB_NEON
|
||||
#endif
|
||||
Z7_ATTRIB_NO_VECTORIZE
|
||||
void
|
||||
Z7_FASTCALL
|
||||
SwapBytes2_128(CSwapUInt16 *items, const CSwapUInt16 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
|
||||
SWAP2_128(0) SWAP2_128(1) items += 2 * 8;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
// Z7_NO_INLINE
|
||||
static
|
||||
#ifdef SWAP_ATTRIB_NEON
|
||||
SWAP_ATTRIB_NEON
|
||||
#endif
|
||||
Z7_ATTRIB_NO_VECTORIZE
|
||||
void
|
||||
Z7_FASTCALL
|
||||
SwapBytes4_128(CSwapUInt32 *items, const CSwapUInt32 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP4_128(0) SWAP4_128(1) items += 2 * 4;
|
||||
SWAP4_128(0) SWAP4_128(1) items += 2 * 4;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
#endif // USE_SWAP_128
|
||||
|
||||
#else // MY_CPU_ARM_OR_ARM64
|
||||
#define FORCE_SWAP_MODE
|
||||
#endif // MY_CPU_ARM_OR_ARM64
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#if defined(Z7_MSC_VER_ORIGINAL) && defined(MY_CPU_X86)
|
||||
/* _byteswap_ushort() in MSVC x86 32-bit works via slow { mov dh, al; mov dl, ah }
|
||||
So we use own versions of byteswap function */
|
||||
#if (_MSC_VER < 1400 ) // old MSVC-X86 without _rotr16() support
|
||||
#define SWAP2_16(i) { UInt32 v = items[i]; v += (v << 16); v >>= 8; items[i] = (CSwapUInt16)v; }
|
||||
#else // is new MSVC-X86 with fast _rotr16()
|
||||
#include <intrin.h>
|
||||
#define SWAP2_16(i) { items[i] = _rotr16(items[i], 8); }
|
||||
#endif
|
||||
#else // is not MSVC-X86
|
||||
#define SWAP2_16(i) { CSwapUInt16 v = items[i]; items[i] = Z7_BSWAP16(v); }
|
||||
#endif // MSVC-X86
|
||||
|
||||
#if defined(Z7_CPU_FAST_BSWAP_SUPPORTED)
|
||||
#define SWAP4_32(i) { CSwapUInt32 v = items[i]; items[i] = Z7_BSWAP32(v); }
|
||||
#else
|
||||
#define SWAP4_32(i) \
|
||||
{ UInt32 v = items[i]; \
|
||||
v = ((v & 0xff00ff) << 8) + ((v >> 8) & 0xff00ff); \
|
||||
v = rotlFixed(v, 16); \
|
||||
items[i] = v; }
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#if defined(FORCE_SWAP_MODE) && defined(USE_SWAP_128)
|
||||
#define DEFAULT_Swap2 SwapBytes2_128
|
||||
#if !defined(MY_CPU_X86_OR_AMD64)
|
||||
#define DEFAULT_Swap4 SwapBytes4_128
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(DEFAULT_Swap2) || !defined(DEFAULT_Swap4)
|
||||
|
||||
#define SWAP_BASE_FUNCS_PREFIXES \
|
||||
Z7_FORCE_INLINE \
|
||||
static \
|
||||
Z7_ATTRIB_NO_VECTOR \
|
||||
void Z7_FASTCALL
|
||||
|
||||
|
||||
#ifdef MY_CPU_64BIT
|
||||
|
||||
#if defined(MY_CPU_ARM64) \
|
||||
&& defined(__ARM_ARCH) && (__ARM_ARCH >= 8) \
|
||||
&& ( (defined(__GNUC__) && (__GNUC__ >= 4)) \
|
||||
|| (defined(__clang__) && (__clang_major__ >= 4)))
|
||||
|
||||
#define SWAP2_64_VAR(v) asm ("rev16 %x0,%x0" : "+r" (v));
|
||||
#define SWAP4_64_VAR(v) asm ("rev32 %x0,%x0" : "+r" (v));
|
||||
|
||||
#else // is not ARM64-GNU
|
||||
|
||||
#if !defined(MY_CPU_X86_OR_AMD64) || (k_SwapBytes_Mode_MIN == 0) || !defined(USE_SWAP_128)
|
||||
#define SWAP2_64_VAR(v) \
|
||||
v = ( 0x00ff00ff00ff00ff & (v >> 8)) \
|
||||
+ ((0x00ff00ff00ff00ff & v) << 8);
|
||||
/* plus gives faster code in MSVC */
|
||||
#endif
|
||||
|
||||
#ifdef Z7_CPU_FAST_BSWAP_SUPPORTED
|
||||
#define SWAP4_64_VAR(v) \
|
||||
v = Z7_BSWAP64(v); \
|
||||
v = Z7_ROTL64(v, 32);
|
||||
#else
|
||||
#define SWAP4_64_VAR(v) \
|
||||
v = ( 0x000000ff000000ff & (v >> 24)) \
|
||||
+ ((0x000000ff000000ff & v) << 24 ) \
|
||||
+ ( 0x0000ff000000ff00 & (v >> 8)) \
|
||||
+ ((0x0000ff000000ff00 & v) << 8 ) \
|
||||
;
|
||||
#endif
|
||||
|
||||
#endif // ARM64-GNU
|
||||
|
||||
|
||||
#ifdef SWAP2_64_VAR
|
||||
|
||||
#define SWAP2_64(i) { \
|
||||
UInt64 v = *(const UInt64 *)(const void *)(items + (i) * 4); \
|
||||
SWAP2_64_VAR(v) \
|
||||
*(UInt64 *)(void *)(items + (i) * 4) = v; }
|
||||
|
||||
SWAP_BASE_FUNCS_PREFIXES
|
||||
SwapBytes2_64(CSwapUInt16 *items, const CSwapUInt16 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP2_64(0) SWAP2_64(1) items += 2 * 4;
|
||||
SWAP2_64(0) SWAP2_64(1) items += 2 * 4;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
#define DEFAULT_Swap2 SwapBytes2_64
|
||||
#if !defined(FORCE_SWAP_MODE)
|
||||
#define SWAP2_DEFAULT_MODE 0
|
||||
#endif
|
||||
#else // !defined(SWAP2_64_VAR)
|
||||
#define DEFAULT_Swap2 SwapBytes2_128
|
||||
#if !defined(FORCE_SWAP_MODE)
|
||||
#define SWAP2_DEFAULT_MODE 1
|
||||
#endif
|
||||
#endif // SWAP2_64_VAR
|
||||
|
||||
|
||||
#define SWAP4_64(i) { \
|
||||
UInt64 v = *(const UInt64 *)(const void *)(items + (i) * 2); \
|
||||
SWAP4_64_VAR(v) \
|
||||
*(UInt64 *)(void *)(items + (i) * 2) = v; }
|
||||
|
||||
SWAP_BASE_FUNCS_PREFIXES
|
||||
SwapBytes4_64(CSwapUInt32 *items, const CSwapUInt32 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP4_64(0) SWAP4_64(1) items += 2 * 2;
|
||||
SWAP4_64(0) SWAP4_64(1) items += 2 * 2;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
#define DEFAULT_Swap4 SwapBytes4_64
|
||||
|
||||
#else // is not 64BIT
|
||||
|
||||
|
||||
#if defined(MY_CPU_ARM_OR_ARM64) \
|
||||
&& defined(__ARM_ARCH) && (__ARM_ARCH >= 6) \
|
||||
&& ( (defined(__GNUC__) && (__GNUC__ >= 4)) \
|
||||
|| (defined(__clang__) && (__clang_major__ >= 4)))
|
||||
|
||||
#ifdef MY_CPU_64BIT
|
||||
#define SWAP2_32_VAR(v) asm ("rev16 %w0,%w0" : "+r" (v));
|
||||
#else
|
||||
#define SWAP2_32_VAR(v) asm ("rev16 %0,%0" : "+r" (v)); // for clang/gcc
|
||||
// asm ("rev16 %r0,%r0" : "+r" (a)); // for gcc
|
||||
#endif
|
||||
|
||||
#elif defined(_MSC_VER) && (_MSC_VER < 1300) && defined(MY_CPU_X86) \
|
||||
|| !defined(Z7_CPU_FAST_BSWAP_SUPPORTED) \
|
||||
|| !defined(Z7_CPU_FAST_ROTATE_SUPPORTED)
|
||||
// old msvc doesn't support _byteswap_ulong()
|
||||
#define SWAP2_32_VAR(v) \
|
||||
v = ((v & 0xff00ff) << 8) + ((v >> 8) & 0xff00ff);
|
||||
|
||||
#else // is not ARM and is not old-MSVC-X86 and fast BSWAP/ROTATE are supported
|
||||
#define SWAP2_32_VAR(v) \
|
||||
v = Z7_BSWAP32(v); \
|
||||
v = rotlFixed(v, 16);
|
||||
|
||||
#endif // GNU-ARM*
|
||||
|
||||
#define SWAP2_32(i) { \
|
||||
UInt32 v = *(const UInt32 *)(const void *)(items + (i) * 2); \
|
||||
SWAP2_32_VAR(v); \
|
||||
*(UInt32 *)(void *)(items + (i) * 2) = v; }
|
||||
|
||||
|
||||
SWAP_BASE_FUNCS_PREFIXES
|
||||
SwapBytes2_32(CSwapUInt16 *items, const CSwapUInt16 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP2_32(0) SWAP2_32(1) items += 2 * 2;
|
||||
SWAP2_32(0) SWAP2_32(1) items += 2 * 2;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
|
||||
SWAP_BASE_FUNCS_PREFIXES
|
||||
SwapBytes4_32(CSwapUInt32 *items, const CSwapUInt32 *lim)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
do
|
||||
{
|
||||
SWAP4_32(0) SWAP4_32(1) items += 2;
|
||||
SWAP4_32(0) SWAP4_32(1) items += 2;
|
||||
}
|
||||
while (items != lim);
|
||||
}
|
||||
|
||||
#define DEFAULT_Swap2 SwapBytes2_32
|
||||
#define DEFAULT_Swap4 SwapBytes4_32
|
||||
#if !defined(FORCE_SWAP_MODE)
|
||||
#define SWAP2_DEFAULT_MODE 0
|
||||
#endif
|
||||
|
||||
#endif // MY_CPU_64BIT
|
||||
#endif // if !defined(DEFAULT_Swap2) || !defined(DEFAULT_Swap4)
|
||||
|
||||
|
||||
|
||||
#if !defined(FORCE_SWAP_MODE)
|
||||
static unsigned g_SwapBytes_Mode;
|
||||
#endif
|
||||
|
||||
/* size of largest unrolled loop iteration: 128 bytes = 4 * 32 bytes (AVX). */
|
||||
#define SWAP_ITERATION_BLOCK_SIZE_MAX (1 << 7)
|
||||
|
||||
// 32 bytes for (AVX) or 2 * 16-bytes for NEON.
|
||||
#define SWAP_VECTOR_ALIGN_SIZE (1 << 5)
|
||||
|
||||
Z7_NO_INLINE
|
||||
void z7_SwapBytes2(CSwapUInt16 *items, size_t numItems)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (SWAP_VECTOR_ALIGN_SIZE - 1)) != 0; numItems--)
|
||||
{
|
||||
SWAP2_16(0)
|
||||
items++;
|
||||
}
|
||||
{
|
||||
const size_t k_Align_Mask = SWAP_ITERATION_BLOCK_SIZE_MAX / sizeof(CSwapUInt16) - 1;
|
||||
size_t numItems2 = numItems;
|
||||
CSwapUInt16 *lim;
|
||||
numItems &= k_Align_Mask;
|
||||
numItems2 &= ~(size_t)k_Align_Mask;
|
||||
lim = items + numItems2;
|
||||
if (numItems2 != 0)
|
||||
{
|
||||
#if !defined(FORCE_SWAP_MODE)
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
#ifdef USE_SWAP_AVX2
|
||||
if (g_SwapBytes_Mode > k_SwapBytes_Mode_SSSE3)
|
||||
ShufBytes_256((__m256i *)(void *)items,
|
||||
(const __m256i *)(const void *)lim,
|
||||
(const __m128i *)(const void *)&(k_ShufMask_Swap2[0]));
|
||||
else
|
||||
#endif
|
||||
#ifdef USE_SWAP_SSSE3
|
||||
if (g_SwapBytes_Mode >= k_SwapBytes_Mode_SSSE3)
|
||||
ShufBytes_128((__m128i *)(void *)items,
|
||||
(const __m128i *)(const void *)lim,
|
||||
(const __m128i *)(const void *)&(k_ShufMask_Swap2[0]));
|
||||
else
|
||||
#endif
|
||||
#endif // MY_CPU_X86_OR_AMD64
|
||||
#if SWAP2_DEFAULT_MODE == 0
|
||||
if (g_SwapBytes_Mode != 0)
|
||||
SwapBytes2_128(items, lim);
|
||||
else
|
||||
#endif
|
||||
#endif // FORCE_SWAP_MODE
|
||||
DEFAULT_Swap2(items, lim);
|
||||
}
|
||||
items = lim;
|
||||
}
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (; numItems != 0; numItems--)
|
||||
{
|
||||
SWAP2_16(0)
|
||||
items++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Z7_NO_INLINE
|
||||
void z7_SwapBytes4(CSwapUInt32 *items, size_t numItems)
|
||||
{
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (SWAP_VECTOR_ALIGN_SIZE - 1)) != 0; numItems--)
|
||||
{
|
||||
SWAP4_32(0)
|
||||
items++;
|
||||
}
|
||||
{
|
||||
const size_t k_Align_Mask = SWAP_ITERATION_BLOCK_SIZE_MAX / sizeof(CSwapUInt32) - 1;
|
||||
size_t numItems2 = numItems;
|
||||
CSwapUInt32 *lim;
|
||||
numItems &= k_Align_Mask;
|
||||
numItems2 &= ~(size_t)k_Align_Mask;
|
||||
lim = items + numItems2;
|
||||
if (numItems2 != 0)
|
||||
{
|
||||
#if !defined(FORCE_SWAP_MODE)
|
||||
#ifdef MY_CPU_X86_OR_AMD64
|
||||
#ifdef USE_SWAP_AVX2
|
||||
if (g_SwapBytes_Mode > k_SwapBytes_Mode_SSSE3)
|
||||
ShufBytes_256((__m256i *)(void *)items,
|
||||
(const __m256i *)(const void *)lim,
|
||||
(const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
|
||||
else
|
||||
#endif
|
||||
#ifdef USE_SWAP_SSSE3
|
||||
if (g_SwapBytes_Mode >= k_SwapBytes_Mode_SSSE3)
|
||||
ShufBytes_128((__m128i *)(void *)items,
|
||||
(const __m128i *)(const void *)lim,
|
||||
(const __m128i *)(const void *)&(k_ShufMask_Swap4[0]));
|
||||
else
|
||||
#endif
|
||||
#else // MY_CPU_X86_OR_AMD64
|
||||
|
||||
if (g_SwapBytes_Mode != 0)
|
||||
SwapBytes4_128(items, lim);
|
||||
else
|
||||
#endif // MY_CPU_X86_OR_AMD64
|
||||
#endif // FORCE_SWAP_MODE
|
||||
DEFAULT_Swap4(items, lim);
|
||||
}
|
||||
items = lim;
|
||||
}
|
||||
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
|
||||
for (; numItems != 0; numItems--)
|
||||
{
|
||||
SWAP4_32(0)
|
||||
items++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// #define SHOW_HW_STATUS
|
||||
|
||||
#ifdef SHOW_HW_STATUS
|
||||
#include <stdio.h>
|
||||
#define PRF(x) x
|
||||
#else
|
||||
#define PRF(x)
|
||||
#endif
|
||||
|
||||
void z7_SwapBytesPrepare(void)
|
||||
{
|
||||
#ifndef FORCE_SWAP_MODE
|
||||
unsigned mode = 0; // k_SwapBytes_Mode_BASE;
|
||||
|
||||
#ifdef MY_CPU_ARM_OR_ARM64
|
||||
{
|
||||
if (CPU_IsSupported_NEON())
|
||||
{
|
||||
// #pragma message ("=== SwapBytes NEON")
|
||||
PRF(printf("\n=== SwapBytes NEON\n");)
|
||||
mode = k_SwapBytes_Mode_NEON;
|
||||
}
|
||||
}
|
||||
#else // MY_CPU_ARM_OR_ARM64
|
||||
{
|
||||
#ifdef USE_SWAP_AVX2
|
||||
if (CPU_IsSupported_AVX2())
|
||||
{
|
||||
// #pragma message ("=== SwapBytes AVX2")
|
||||
PRF(printf("\n=== SwapBytes AVX2\n");)
|
||||
mode = k_SwapBytes_Mode_AVX2;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
#ifdef USE_SWAP_SSSE3
|
||||
if (CPU_IsSupported_SSSE3())
|
||||
{
|
||||
// #pragma message ("=== SwapBytes SSSE3")
|
||||
PRF(printf("\n=== SwapBytes SSSE3\n");)
|
||||
mode = k_SwapBytes_Mode_SSSE3;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
#if !defined(MY_CPU_AMD64)
|
||||
if (CPU_IsSupported_SSE2())
|
||||
#endif
|
||||
{
|
||||
// #pragma message ("=== SwapBytes SSE2")
|
||||
PRF(printf("\n=== SwapBytes SSE2\n");)
|
||||
mode = k_SwapBytes_Mode_SSE2;
|
||||
}
|
||||
}
|
||||
#endif // MY_CPU_ARM_OR_ARM64
|
||||
g_SwapBytes_Mode = mode;
|
||||
// g_SwapBytes_Mode = 0; // for debug
|
||||
#endif // FORCE_SWAP_MODE
|
||||
PRF(printf("\n=== SwapBytesPrepare\n");)
|
||||
}
|
||||
|
||||
#undef PRF
|
|
@ -0,0 +1,562 @@
|
|||
/* Threads.c -- multithreading library
|
||||
2023-03-04 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#ifndef USE_THREADS_CreateThread
|
||||
#include <process.h>
|
||||
#endif
|
||||
|
||||
#include "Threads.h"
|
||||
|
||||
static WRes GetError(void)
|
||||
{
|
||||
const DWORD res = GetLastError();
|
||||
return res ? (WRes)res : 1;
|
||||
}
|
||||
|
||||
static WRes HandleToWRes(HANDLE h) { return (h != NULL) ? 0 : GetError(); }
|
||||
static WRes BOOLToWRes(BOOL v) { return v ? 0 : GetError(); }
|
||||
|
||||
WRes HandlePtr_Close(HANDLE *p)
|
||||
{
|
||||
if (*p != NULL)
|
||||
{
|
||||
if (!CloseHandle(*p))
|
||||
return GetError();
|
||||
*p = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
WRes Handle_WaitObject(HANDLE h)
|
||||
{
|
||||
DWORD dw = WaitForSingleObject(h, INFINITE);
|
||||
/*
|
||||
(dw) result:
|
||||
WAIT_OBJECT_0 // 0
|
||||
WAIT_ABANDONED // 0x00000080 : is not compatible with Win32 Error space
|
||||
WAIT_TIMEOUT // 0x00000102 : is compatible with Win32 Error space
|
||||
WAIT_FAILED // 0xFFFFFFFF
|
||||
*/
|
||||
if (dw == WAIT_FAILED)
|
||||
{
|
||||
dw = GetLastError();
|
||||
if (dw == 0)
|
||||
return WAIT_FAILED;
|
||||
}
|
||||
return (WRes)dw;
|
||||
}
|
||||
|
||||
#define Thread_Wait(p) Handle_WaitObject(*(p))
|
||||
|
||||
WRes Thread_Wait_Close(CThread *p)
|
||||
{
|
||||
WRes res = Thread_Wait(p);
|
||||
WRes res2 = Thread_Close(p);
|
||||
return (res != 0 ? res : res2);
|
||||
}
|
||||
|
||||
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
|
||||
{
|
||||
/* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
|
||||
|
||||
#ifdef USE_THREADS_CreateThread
|
||||
|
||||
DWORD threadId;
|
||||
*p = CreateThread(NULL, 0, func, param, 0, &threadId);
|
||||
|
||||
#else
|
||||
|
||||
unsigned threadId;
|
||||
*p = (HANDLE)(_beginthreadex(NULL, 0, func, param, 0, &threadId));
|
||||
|
||||
#endif
|
||||
|
||||
/* maybe we must use errno here, but probably GetLastError() is also OK. */
|
||||
return HandleToWRes(*p);
|
||||
}
|
||||
|
||||
|
||||
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
|
||||
{
|
||||
#ifdef USE_THREADS_CreateThread
|
||||
|
||||
UNUSED_VAR(affinity)
|
||||
return Thread_Create(p, func, param);
|
||||
|
||||
#else
|
||||
|
||||
/* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
|
||||
HANDLE h;
|
||||
WRes wres;
|
||||
unsigned threadId;
|
||||
h = (HANDLE)(_beginthreadex(NULL, 0, func, param, CREATE_SUSPENDED, &threadId));
|
||||
*p = h;
|
||||
wres = HandleToWRes(h);
|
||||
if (h)
|
||||
{
|
||||
{
|
||||
// DWORD_PTR prevMask =
|
||||
SetThreadAffinityMask(h, (DWORD_PTR)affinity);
|
||||
/*
|
||||
if (prevMask == 0)
|
||||
{
|
||||
// affinity change is non-critical error, so we can ignore it
|
||||
// wres = GetError();
|
||||
}
|
||||
*/
|
||||
}
|
||||
{
|
||||
DWORD prevSuspendCount = ResumeThread(h);
|
||||
/* ResumeThread() returns:
|
||||
0 : was_not_suspended
|
||||
1 : was_resumed
|
||||
-1 : error
|
||||
*/
|
||||
if (prevSuspendCount == (DWORD)-1)
|
||||
wres = GetError();
|
||||
}
|
||||
}
|
||||
|
||||
/* maybe we must use errno here, but probably GetLastError() is also OK. */
|
||||
return wres;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static WRes Event_Create(CEvent *p, BOOL manualReset, int signaled)
|
||||
{
|
||||
*p = CreateEvent(NULL, manualReset, (signaled ? TRUE : FALSE), NULL);
|
||||
return HandleToWRes(*p);
|
||||
}
|
||||
|
||||
WRes Event_Set(CEvent *p) { return BOOLToWRes(SetEvent(*p)); }
|
||||
WRes Event_Reset(CEvent *p) { return BOOLToWRes(ResetEvent(*p)); }
|
||||
|
||||
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled) { return Event_Create(p, TRUE, signaled); }
|
||||
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled) { return Event_Create(p, FALSE, signaled); }
|
||||
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p) { return ManualResetEvent_Create(p, 0); }
|
||||
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p) { return AutoResetEvent_Create(p, 0); }
|
||||
|
||||
|
||||
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
|
||||
{
|
||||
// negative ((LONG)maxCount) is not supported in WIN32::CreateSemaphore()
|
||||
*p = CreateSemaphore(NULL, (LONG)initCount, (LONG)maxCount, NULL);
|
||||
return HandleToWRes(*p);
|
||||
}
|
||||
|
||||
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
|
||||
{
|
||||
// if (Semaphore_IsCreated(p))
|
||||
{
|
||||
WRes wres = Semaphore_Close(p);
|
||||
if (wres != 0)
|
||||
return wres;
|
||||
}
|
||||
return Semaphore_Create(p, initCount, maxCount);
|
||||
}
|
||||
|
||||
static WRes Semaphore_Release(CSemaphore *p, LONG releaseCount, LONG *previousCount)
|
||||
{ return BOOLToWRes(ReleaseSemaphore(*p, releaseCount, previousCount)); }
|
||||
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num)
|
||||
{ return Semaphore_Release(p, (LONG)num, NULL); }
|
||||
WRes Semaphore_Release1(CSemaphore *p) { return Semaphore_ReleaseN(p, 1); }
|
||||
|
||||
WRes CriticalSection_Init(CCriticalSection *p)
|
||||
{
|
||||
/* InitializeCriticalSection() can raise exception:
|
||||
Windows XP, 2003 : can raise a STATUS_NO_MEMORY exception
|
||||
Windows Vista+ : no exceptions */
|
||||
#ifdef _MSC_VER
|
||||
#ifdef __clang__
|
||||
#pragma GCC diagnostic ignored "-Wlanguage-extension-token"
|
||||
#endif
|
||||
__try
|
||||
#endif
|
||||
{
|
||||
InitializeCriticalSection(p);
|
||||
/* InitializeCriticalSectionAndSpinCount(p, 0); */
|
||||
}
|
||||
#ifdef _MSC_VER
|
||||
__except (EXCEPTION_EXECUTE_HANDLER) { return ERROR_NOT_ENOUGH_MEMORY; }
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#else // _WIN32
|
||||
|
||||
// ---------- POSIX ----------
|
||||
|
||||
#ifndef __APPLE__
|
||||
#ifndef Z7_AFFINITY_DISABLE
|
||||
// _GNU_SOURCE can be required for pthread_setaffinity_np() / CPU_ZERO / CPU_SET
|
||||
// clang < 3.6 : unknown warning group '-Wreserved-id-macro'
|
||||
// clang 3.6 - 12.01 : gives warning "macro name is a reserved identifier"
|
||||
// clang >= 13 : do not give warning
|
||||
#if !defined(_GNU_SOURCE)
|
||||
#if defined(__clang__) && (__clang_major__ >= 4) && (__clang_major__ <= 12)
|
||||
#pragma GCC diagnostic ignored "-Wreserved-id-macro"
|
||||
#endif
|
||||
#define _GNU_SOURCE
|
||||
#endif // !defined(_GNU_SOURCE)
|
||||
#endif // Z7_AFFINITY_DISABLE
|
||||
#endif // __APPLE__
|
||||
|
||||
#include "Threads.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#ifdef Z7_AFFINITY_SUPPORTED
|
||||
// #include <sched.h>
|
||||
#endif
|
||||
|
||||
|
||||
// #include <stdio.h>
|
||||
// #define PRF(p) p
|
||||
#define PRF(p)
|
||||
#define Print(s) PRF(printf("\n%s\n", s);)
|
||||
|
||||
WRes Thread_Create_With_CpuSet(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, const CCpuSet *cpuSet)
|
||||
{
|
||||
// new thread in Posix probably inherits affinity from parrent thread
|
||||
Print("Thread_Create_With_CpuSet")
|
||||
|
||||
pthread_attr_t attr;
|
||||
int ret;
|
||||
// int ret2;
|
||||
|
||||
p->_created = 0;
|
||||
|
||||
RINOK(pthread_attr_init(&attr))
|
||||
|
||||
ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
|
||||
|
||||
if (!ret)
|
||||
{
|
||||
if (cpuSet)
|
||||
{
|
||||
#ifdef Z7_AFFINITY_SUPPORTED
|
||||
|
||||
/*
|
||||
printf("\n affinity :");
|
||||
unsigned i;
|
||||
for (i = 0; i < sizeof(*cpuSet) && i < 8; i++)
|
||||
{
|
||||
Byte b = *((const Byte *)cpuSet + i);
|
||||
char temp[32];
|
||||
#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
|
||||
temp[0] = GET_HEX_CHAR((b & 0xF));
|
||||
temp[1] = GET_HEX_CHAR((b >> 4));
|
||||
// temp[0] = GET_HEX_CHAR((b >> 4)); // big-endian
|
||||
// temp[1] = GET_HEX_CHAR((b & 0xF)); // big-endian
|
||||
temp[2] = 0;
|
||||
printf("%s", temp);
|
||||
}
|
||||
printf("\n");
|
||||
*/
|
||||
|
||||
// ret2 =
|
||||
pthread_attr_setaffinity_np(&attr, sizeof(*cpuSet), cpuSet);
|
||||
// if (ret2) ret = ret2;
|
||||
#endif
|
||||
}
|
||||
|
||||
ret = pthread_create(&p->_tid, &attr, func, param);
|
||||
|
||||
if (!ret)
|
||||
{
|
||||
p->_created = 1;
|
||||
/*
|
||||
if (cpuSet)
|
||||
{
|
||||
// ret2 =
|
||||
pthread_setaffinity_np(p->_tid, sizeof(*cpuSet), cpuSet);
|
||||
// if (ret2) ret = ret2;
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
// ret2 =
|
||||
pthread_attr_destroy(&attr);
|
||||
// if (ret2 != 0) ret = ret2;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
|
||||
{
|
||||
return Thread_Create_With_CpuSet(p, func, param, NULL);
|
||||
}
|
||||
|
||||
|
||||
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
|
||||
{
|
||||
Print("Thread_Create_WithAffinity")
|
||||
CCpuSet cs;
|
||||
unsigned i;
|
||||
CpuSet_Zero(&cs);
|
||||
for (i = 0; i < sizeof(affinity) * 8; i++)
|
||||
{
|
||||
if (affinity == 0)
|
||||
break;
|
||||
if (affinity & 1)
|
||||
{
|
||||
CpuSet_Set(&cs, i);
|
||||
}
|
||||
affinity >>= 1;
|
||||
}
|
||||
return Thread_Create_With_CpuSet(p, func, param, &cs);
|
||||
}
|
||||
|
||||
|
||||
WRes Thread_Close(CThread *p)
|
||||
{
|
||||
// Print("Thread_Close")
|
||||
int ret;
|
||||
if (!p->_created)
|
||||
return 0;
|
||||
|
||||
ret = pthread_detach(p->_tid);
|
||||
p->_tid = 0;
|
||||
p->_created = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
WRes Thread_Wait_Close(CThread *p)
|
||||
{
|
||||
// Print("Thread_Wait_Close")
|
||||
void *thread_return;
|
||||
int ret;
|
||||
if (!p->_created)
|
||||
return EINVAL;
|
||||
|
||||
ret = pthread_join(p->_tid, &thread_return);
|
||||
// probably we can't use that (_tid) after pthread_join(), so we close thread here
|
||||
p->_created = 0;
|
||||
p->_tid = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static WRes Event_Create(CEvent *p, int manualReset, int signaled)
|
||||
{
|
||||
RINOK(pthread_mutex_init(&p->_mutex, NULL))
|
||||
RINOK(pthread_cond_init(&p->_cond, NULL))
|
||||
p->_manual_reset = manualReset;
|
||||
p->_state = (signaled ? True : False);
|
||||
p->_created = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled)
|
||||
{ return Event_Create(p, True, signaled); }
|
||||
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p)
|
||||
{ return ManualResetEvent_Create(p, 0); }
|
||||
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled)
|
||||
{ return Event_Create(p, False, signaled); }
|
||||
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p)
|
||||
{ return AutoResetEvent_Create(p, 0); }
|
||||
|
||||
|
||||
WRes Event_Set(CEvent *p)
|
||||
{
|
||||
RINOK(pthread_mutex_lock(&p->_mutex))
|
||||
p->_state = True;
|
||||
int res1 = pthread_cond_broadcast(&p->_cond);
|
||||
int res2 = pthread_mutex_unlock(&p->_mutex);
|
||||
return (res2 ? res2 : res1);
|
||||
}
|
||||
|
||||
WRes Event_Reset(CEvent *p)
|
||||
{
|
||||
RINOK(pthread_mutex_lock(&p->_mutex))
|
||||
p->_state = False;
|
||||
return pthread_mutex_unlock(&p->_mutex);
|
||||
}
|
||||
|
||||
WRes Event_Wait(CEvent *p)
|
||||
{
|
||||
RINOK(pthread_mutex_lock(&p->_mutex))
|
||||
while (p->_state == False)
|
||||
{
|
||||
// ETIMEDOUT
|
||||
// ret =
|
||||
pthread_cond_wait(&p->_cond, &p->_mutex);
|
||||
// if (ret != 0) break;
|
||||
}
|
||||
if (p->_manual_reset == False)
|
||||
{
|
||||
p->_state = False;
|
||||
}
|
||||
return pthread_mutex_unlock(&p->_mutex);
|
||||
}
|
||||
|
||||
WRes Event_Close(CEvent *p)
|
||||
{
|
||||
if (!p->_created)
|
||||
return 0;
|
||||
p->_created = 0;
|
||||
{
|
||||
int res1 = pthread_mutex_destroy(&p->_mutex);
|
||||
int res2 = pthread_cond_destroy(&p->_cond);
|
||||
return (res1 ? res1 : res2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
|
||||
{
|
||||
if (initCount > maxCount || maxCount < 1)
|
||||
return EINVAL;
|
||||
RINOK(pthread_mutex_init(&p->_mutex, NULL))
|
||||
RINOK(pthread_cond_init(&p->_cond, NULL))
|
||||
p->_count = initCount;
|
||||
p->_maxCount = maxCount;
|
||||
p->_created = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
|
||||
{
|
||||
if (Semaphore_IsCreated(p))
|
||||
{
|
||||
/*
|
||||
WRes wres = Semaphore_Close(p);
|
||||
if (wres != 0)
|
||||
return wres;
|
||||
*/
|
||||
if (initCount > maxCount || maxCount < 1)
|
||||
return EINVAL;
|
||||
// return EINVAL; // for debug
|
||||
p->_count = initCount;
|
||||
p->_maxCount = maxCount;
|
||||
return 0;
|
||||
}
|
||||
return Semaphore_Create(p, initCount, maxCount);
|
||||
}
|
||||
|
||||
|
||||
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
|
||||
{
|
||||
UInt32 newCount;
|
||||
int ret;
|
||||
|
||||
if (releaseCount < 1)
|
||||
return EINVAL;
|
||||
|
||||
RINOK(pthread_mutex_lock(&p->_mutex))
|
||||
|
||||
newCount = p->_count + releaseCount;
|
||||
if (newCount > p->_maxCount)
|
||||
ret = ERROR_TOO_MANY_POSTS; // EINVAL;
|
||||
else
|
||||
{
|
||||
p->_count = newCount;
|
||||
ret = pthread_cond_broadcast(&p->_cond);
|
||||
}
|
||||
RINOK(pthread_mutex_unlock(&p->_mutex))
|
||||
return ret;
|
||||
}
|
||||
|
||||
WRes Semaphore_Wait(CSemaphore *p)
|
||||
{
|
||||
RINOK(pthread_mutex_lock(&p->_mutex))
|
||||
while (p->_count < 1)
|
||||
{
|
||||
pthread_cond_wait(&p->_cond, &p->_mutex);
|
||||
}
|
||||
p->_count--;
|
||||
return pthread_mutex_unlock(&p->_mutex);
|
||||
}
|
||||
|
||||
WRes Semaphore_Close(CSemaphore *p)
|
||||
{
|
||||
if (!p->_created)
|
||||
return 0;
|
||||
p->_created = 0;
|
||||
{
|
||||
int res1 = pthread_mutex_destroy(&p->_mutex);
|
||||
int res2 = pthread_cond_destroy(&p->_cond);
|
||||
return (res1 ? res1 : res2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
WRes CriticalSection_Init(CCriticalSection *p)
|
||||
{
|
||||
// Print("CriticalSection_Init")
|
||||
if (!p)
|
||||
return EINTR;
|
||||
return pthread_mutex_init(&p->_mutex, NULL);
|
||||
}
|
||||
|
||||
void CriticalSection_Enter(CCriticalSection *p)
|
||||
{
|
||||
// Print("CriticalSection_Enter")
|
||||
if (p)
|
||||
{
|
||||
// int ret =
|
||||
pthread_mutex_lock(&p->_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void CriticalSection_Leave(CCriticalSection *p)
|
||||
{
|
||||
// Print("CriticalSection_Leave")
|
||||
if (p)
|
||||
{
|
||||
// int ret =
|
||||
pthread_mutex_unlock(&p->_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void CriticalSection_Delete(CCriticalSection *p)
|
||||
{
|
||||
// Print("CriticalSection_Delete")
|
||||
if (p)
|
||||
{
|
||||
// int ret =
|
||||
pthread_mutex_destroy(&p->_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
LONG InterlockedIncrement(LONG volatile *addend)
|
||||
{
|
||||
// Print("InterlockedIncrement")
|
||||
#ifdef USE_HACK_UNSAFE_ATOMIC
|
||||
LONG val = *addend + 1;
|
||||
*addend = val;
|
||||
return val;
|
||||
#else
|
||||
|
||||
#if defined(__clang__) && (__clang_major__ >= 8)
|
||||
#pragma GCC diagnostic ignored "-Watomic-implicit-seq-cst"
|
||||
#endif
|
||||
return __sync_add_and_fetch(addend, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
WRes AutoResetEvent_OptCreate_And_Reset(CAutoResetEvent *p)
|
||||
{
|
||||
if (Event_IsCreated(p))
|
||||
return Event_Reset(p);
|
||||
return AutoResetEvent_CreateNotSignaled(p);
|
||||
}
|
||||
|
||||
#undef PRF
|
||||
#undef Print
|
|
@ -1,5 +1,5 @@
|
|||
/* Xz.c - Xz
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -70,7 +70,7 @@ int XzCheck_Final(CXzCheck *p, Byte *digest)
|
|||
switch (p->mode)
|
||||
{
|
||||
case XZ_CHECK_CRC32:
|
||||
SetUi32(digest, CRC_GET_DIGEST(p->crc));
|
||||
SetUi32(digest, CRC_GET_DIGEST(p->crc))
|
||||
break;
|
||||
case XZ_CHECK_CRC64:
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* XzCrc64.c -- CRC64 calculation
|
||||
2017-05-23 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -12,39 +12,30 @@
|
|||
#define CRC64_NUM_TABLES 4
|
||||
#else
|
||||
#define CRC64_NUM_TABLES 5
|
||||
#define CRC_UINT64_SWAP(v) \
|
||||
((v >> 56) \
|
||||
| ((v >> 40) & ((UInt64)0xFF << 8)) \
|
||||
| ((v >> 24) & ((UInt64)0xFF << 16)) \
|
||||
| ((v >> 8) & ((UInt64)0xFF << 24)) \
|
||||
| ((v << 8) & ((UInt64)0xFF << 32)) \
|
||||
| ((v << 24) & ((UInt64)0xFF << 40)) \
|
||||
| ((v << 40) & ((UInt64)0xFF << 48)) \
|
||||
| ((v << 56)))
|
||||
|
||||
UInt64 MY_FAST_CALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
UInt64 Z7_FASTCALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
#endif
|
||||
|
||||
#ifndef MY_CPU_BE
|
||||
UInt64 MY_FAST_CALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
UInt64 Z7_FASTCALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
#endif
|
||||
|
||||
typedef UInt64 (MY_FAST_CALL *CRC64_FUNC)(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
typedef UInt64 (Z7_FASTCALL *CRC64_FUNC)(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
|
||||
static CRC64_FUNC g_Crc64Update;
|
||||
UInt64 g_Crc64Table[256 * CRC64_NUM_TABLES];
|
||||
|
||||
UInt64 MY_FAST_CALL Crc64Update(UInt64 v, const void *data, size_t size)
|
||||
UInt64 Z7_FASTCALL Crc64Update(UInt64 v, const void *data, size_t size)
|
||||
{
|
||||
return g_Crc64Update(v, data, size, g_Crc64Table);
|
||||
}
|
||||
|
||||
UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size)
|
||||
UInt64 Z7_FASTCALL Crc64Calc(const void *data, size_t size)
|
||||
{
|
||||
return g_Crc64Update(CRC64_INIT_VAL, data, size, g_Crc64Table) ^ CRC64_INIT_VAL;
|
||||
}
|
||||
|
||||
void MY_FAST_CALL Crc64GenerateTable()
|
||||
void Z7_FASTCALL Crc64GenerateTable(void)
|
||||
{
|
||||
UInt32 i;
|
||||
for (i = 0; i < 256; i++)
|
||||
|
@ -57,7 +48,7 @@ void MY_FAST_CALL Crc64GenerateTable()
|
|||
}
|
||||
for (i = 256; i < 256 * CRC64_NUM_TABLES; i++)
|
||||
{
|
||||
UInt64 r = g_Crc64Table[(size_t)i - 256];
|
||||
const UInt64 r = g_Crc64Table[(size_t)i - 256];
|
||||
g_Crc64Table[i] = g_Crc64Table[r & 0xFF] ^ (r >> 8);
|
||||
}
|
||||
|
||||
|
@ -76,11 +67,14 @@ void MY_FAST_CALL Crc64GenerateTable()
|
|||
{
|
||||
for (i = 256 * CRC64_NUM_TABLES - 1; i >= 256; i--)
|
||||
{
|
||||
UInt64 x = g_Crc64Table[(size_t)i - 256];
|
||||
g_Crc64Table[i] = CRC_UINT64_SWAP(x);
|
||||
const UInt64 x = g_Crc64Table[(size_t)i - 256];
|
||||
g_Crc64Table[i] = Z7_BSWAP64(x);
|
||||
}
|
||||
g_Crc64Update = XzCrc64UpdateT1_BeT4;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef kCrc64Poly
|
||||
#undef CRC64_NUM_TABLES
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* XzCrc64Opt.c -- CRC64 calculation
|
||||
2021-02-09 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -9,15 +9,15 @@
|
|||
|
||||
#define CRC64_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
|
||||
|
||||
UInt64 MY_FAST_CALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
UInt64 MY_FAST_CALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table)
|
||||
UInt64 Z7_FASTCALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
UInt64 Z7_FASTCALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
|
||||
v = CRC64_UPDATE_BYTE_2(v, *p);
|
||||
for (; size >= 4; size -= 4, p += 4)
|
||||
{
|
||||
UInt32 d = (UInt32)v ^ *(const UInt32 *)(const void *)p;
|
||||
const UInt32 d = (UInt32)v ^ *(const UInt32 *)(const void *)p;
|
||||
v = (v >> 32)
|
||||
^ (table + 0x300)[((d ) & 0xFF)]
|
||||
^ (table + 0x200)[((d >> 8) & 0xFF)]
|
||||
|
@ -34,29 +34,19 @@ UInt64 MY_FAST_CALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, con
|
|||
|
||||
#ifndef MY_CPU_LE
|
||||
|
||||
#define CRC_UINT64_SWAP(v) \
|
||||
((v >> 56) \
|
||||
| ((v >> 40) & ((UInt64)0xFF << 8)) \
|
||||
| ((v >> 24) & ((UInt64)0xFF << 16)) \
|
||||
| ((v >> 8) & ((UInt64)0xFF << 24)) \
|
||||
| ((v << 8) & ((UInt64)0xFF << 32)) \
|
||||
| ((v << 24) & ((UInt64)0xFF << 40)) \
|
||||
| ((v << 40) & ((UInt64)0xFF << 48)) \
|
||||
| ((v << 56)))
|
||||
|
||||
#define CRC64_UPDATE_BYTE_2_BE(crc, b) (table[(Byte)((crc) >> 56) ^ (b)] ^ ((crc) << 8))
|
||||
|
||||
UInt64 MY_FAST_CALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
UInt64 MY_FAST_CALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table)
|
||||
UInt64 Z7_FASTCALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table);
|
||||
UInt64 Z7_FASTCALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table)
|
||||
{
|
||||
const Byte *p = (const Byte *)data;
|
||||
table += 0x100;
|
||||
v = CRC_UINT64_SWAP(v);
|
||||
v = Z7_BSWAP64(v);
|
||||
for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
|
||||
v = CRC64_UPDATE_BYTE_2_BE(v, *p);
|
||||
for (; size >= 4; size -= 4, p += 4)
|
||||
{
|
||||
UInt32 d = (UInt32)(v >> 32) ^ *(const UInt32 *)(const void *)p;
|
||||
const UInt32 d = (UInt32)(v >> 32) ^ *(const UInt32 *)(const void *)p;
|
||||
v = (v << 32)
|
||||
^ (table + 0x000)[((d ) & 0xFF)]
|
||||
^ (table + 0x100)[((d >> 8) & 0xFF)]
|
||||
|
@ -65,7 +55,7 @@ UInt64 MY_FAST_CALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size
|
|||
}
|
||||
for (; size > 0; size--, p++)
|
||||
v = CRC64_UPDATE_BYTE_2_BE(v, *p);
|
||||
return CRC_UINT64_SWAP(v);
|
||||
return Z7_BSWAP64(v);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* XzDec.c -- Xz Decode
|
||||
2021-09-04 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -67,7 +67,8 @@ unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* ---------- BraState ---------- */
|
||||
|
||||
/* ---------- XzBcFilterState ---------- */
|
||||
|
||||
#define BRA_BUF_SIZE (1 << 14)
|
||||
|
||||
|
@ -76,27 +77,29 @@ typedef struct
|
|||
size_t bufPos;
|
||||
size_t bufConv;
|
||||
size_t bufTotal;
|
||||
Byte *buf; // must be aligned for 4 bytes
|
||||
Xz_Func_BcFilterStateBase_Filter filter_func;
|
||||
// int encodeMode;
|
||||
CXzBcFilterStateBase base;
|
||||
// Byte buf[BRA_BUF_SIZE];
|
||||
} CXzBcFilterState;
|
||||
|
||||
int encodeMode;
|
||||
|
||||
UInt32 methodId;
|
||||
UInt32 delta;
|
||||
UInt32 ip;
|
||||
UInt32 x86State;
|
||||
Byte deltaState[DELTA_STATE_SIZE];
|
||||
|
||||
Byte buf[BRA_BUF_SIZE];
|
||||
} CBraState;
|
||||
|
||||
static void BraState_Free(void *pp, ISzAllocPtr alloc)
|
||||
static void XzBcFilterState_Free(void *pp, ISzAllocPtr alloc)
|
||||
{
|
||||
ISzAlloc_Free(alloc, pp);
|
||||
if (pp)
|
||||
{
|
||||
CXzBcFilterState *p = ((CXzBcFilterState *)pp);
|
||||
ISzAlloc_Free(alloc, p->buf);
|
||||
ISzAlloc_Free(alloc, pp);
|
||||
}
|
||||
}
|
||||
|
||||
static SRes BraState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc)
|
||||
|
||||
static SRes XzBcFilterState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc)
|
||||
{
|
||||
CBraState *p = ((CBraState *)pp);
|
||||
UNUSED_VAR(alloc);
|
||||
CXzBcFilterStateBase *p = &((CXzBcFilterState *)pp)->base;
|
||||
UNUSED_VAR(alloc)
|
||||
p->ip = 0;
|
||||
if (p->methodId == XZ_ID_Delta)
|
||||
{
|
||||
|
@ -114,6 +117,7 @@ static SRes BraState_SetProps(void *pp, const Byte *props, size_t propSize, ISzA
|
|||
case XZ_ID_PPC:
|
||||
case XZ_ID_ARM:
|
||||
case XZ_ID_SPARC:
|
||||
case XZ_ID_ARM64:
|
||||
if ((v & 3) != 0)
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
break;
|
||||
|
@ -134,73 +138,90 @@ static SRes BraState_SetProps(void *pp, const Byte *props, size_t propSize, ISzA
|
|||
return SZ_OK;
|
||||
}
|
||||
|
||||
static void BraState_Init(void *pp)
|
||||
|
||||
static void XzBcFilterState_Init(void *pp)
|
||||
{
|
||||
CBraState *p = ((CBraState *)pp);
|
||||
CXzBcFilterState *p = ((CXzBcFilterState *)pp);
|
||||
p->bufPos = p->bufConv = p->bufTotal = 0;
|
||||
x86_Convert_Init(p->x86State);
|
||||
if (p->methodId == XZ_ID_Delta)
|
||||
Delta_Init(p->deltaState);
|
||||
p->base.X86_State = Z7_BRANCH_CONV_ST_X86_STATE_INIT_VAL;
|
||||
if (p->base.methodId == XZ_ID_Delta)
|
||||
Delta_Init(p->base.delta_State);
|
||||
}
|
||||
|
||||
|
||||
#define CASE_BRA_CONV(isa) case XZ_ID_ ## isa: size = isa ## _Convert(data, size, p->ip, p->encodeMode); break;
|
||||
|
||||
static SizeT BraState_Filter(void *pp, Byte *data, SizeT size)
|
||||
static const z7_Func_BranchConv g_Funcs_BranchConv_RISC_Dec[] =
|
||||
{
|
||||
Z7_BRANCH_CONV_DEC(PPC),
|
||||
Z7_BRANCH_CONV_DEC(IA64),
|
||||
Z7_BRANCH_CONV_DEC(ARM),
|
||||
Z7_BRANCH_CONV_DEC(ARMT),
|
||||
Z7_BRANCH_CONV_DEC(SPARC),
|
||||
Z7_BRANCH_CONV_DEC(ARM64)
|
||||
};
|
||||
|
||||
static SizeT XzBcFilterStateBase_Filter_Dec(CXzBcFilterStateBase *p, Byte *data, SizeT size)
|
||||
{
|
||||
CBraState *p = ((CBraState *)pp);
|
||||
switch (p->methodId)
|
||||
{
|
||||
case XZ_ID_Delta:
|
||||
if (p->encodeMode)
|
||||
Delta_Encode(p->deltaState, p->delta, data, size);
|
||||
else
|
||||
Delta_Decode(p->deltaState, p->delta, data, size);
|
||||
Delta_Decode(p->delta_State, p->delta, data, size);
|
||||
break;
|
||||
case XZ_ID_X86:
|
||||
size = x86_Convert(data, size, p->ip, &p->x86State, p->encodeMode);
|
||||
size = (SizeT)(z7_BranchConvSt_X86_Dec(data, size, p->ip, &p->X86_State) - data);
|
||||
break;
|
||||
default:
|
||||
if (p->methodId >= XZ_ID_PPC)
|
||||
{
|
||||
const UInt32 i = p->methodId - XZ_ID_PPC;
|
||||
if (i < Z7_ARRAY_SIZE(g_Funcs_BranchConv_RISC_Dec))
|
||||
size = (SizeT)(g_Funcs_BranchConv_RISC_Dec[i](data, size, p->ip) - data);
|
||||
}
|
||||
break;
|
||||
CASE_BRA_CONV(PPC)
|
||||
CASE_BRA_CONV(IA64)
|
||||
CASE_BRA_CONV(ARM)
|
||||
CASE_BRA_CONV(ARMT)
|
||||
CASE_BRA_CONV(SPARC)
|
||||
}
|
||||
p->ip += (UInt32)size;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
static SRes BraState_Code2(void *pp,
|
||||
static SizeT XzBcFilterState_Filter(void *pp, Byte *data, SizeT size)
|
||||
{
|
||||
CXzBcFilterState *p = ((CXzBcFilterState *)pp);
|
||||
return p->filter_func(&p->base, data, size);
|
||||
}
|
||||
|
||||
|
||||
static SRes XzBcFilterState_Code2(void *pp,
|
||||
Byte *dest, SizeT *destLen,
|
||||
const Byte *src, SizeT *srcLen, int srcWasFinished,
|
||||
ECoderFinishMode finishMode,
|
||||
// int *wasFinished
|
||||
ECoderStatus *status)
|
||||
{
|
||||
CBraState *p = ((CBraState *)pp);
|
||||
CXzBcFilterState *p = ((CXzBcFilterState *)pp);
|
||||
SizeT destRem = *destLen;
|
||||
SizeT srcRem = *srcLen;
|
||||
UNUSED_VAR(finishMode);
|
||||
UNUSED_VAR(finishMode)
|
||||
|
||||
*destLen = 0;
|
||||
*srcLen = 0;
|
||||
// *wasFinished = False;
|
||||
*status = CODER_STATUS_NOT_FINISHED;
|
||||
|
||||
while (destRem > 0)
|
||||
while (destRem != 0)
|
||||
{
|
||||
if (p->bufPos != p->bufConv)
|
||||
{
|
||||
size_t size = p->bufConv - p->bufPos;
|
||||
if (size > destRem)
|
||||
size = destRem;
|
||||
memcpy(dest, p->buf + p->bufPos, size);
|
||||
p->bufPos += size;
|
||||
*destLen += size;
|
||||
dest += size;
|
||||
destRem -= size;
|
||||
continue;
|
||||
if (size)
|
||||
{
|
||||
if (size > destRem)
|
||||
size = destRem;
|
||||
memcpy(dest, p->buf + p->bufPos, size);
|
||||
p->bufPos += size;
|
||||
*destLen += size;
|
||||
dest += size;
|
||||
destRem -= size;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
p->bufTotal -= p->bufPos;
|
||||
|
@ -220,7 +241,7 @@ static SRes BraState_Code2(void *pp,
|
|||
if (p->bufTotal == 0)
|
||||
break;
|
||||
|
||||
p->bufConv = BraState_Filter(pp, p->buf, p->bufTotal);
|
||||
p->bufConv = p->filter_func(&p->base, p->buf, p->bufTotal);
|
||||
|
||||
if (p->bufConv == 0)
|
||||
{
|
||||
|
@ -240,27 +261,37 @@ static SRes BraState_Code2(void *pp,
|
|||
}
|
||||
|
||||
|
||||
SRes BraState_SetFromMethod(IStateCoder *p, UInt64 id, int encodeMode, ISzAllocPtr alloc);
|
||||
SRes BraState_SetFromMethod(IStateCoder *p, UInt64 id, int encodeMode, ISzAllocPtr alloc)
|
||||
#define XZ_IS_SUPPORTED_FILTER_ID(id) \
|
||||
((id) >= XZ_ID_Delta && (id) <= XZ_ID_ARM64)
|
||||
|
||||
SRes Xz_StateCoder_Bc_SetFromMethod_Func(IStateCoder *p, UInt64 id,
|
||||
Xz_Func_BcFilterStateBase_Filter func, ISzAllocPtr alloc)
|
||||
{
|
||||
CBraState *decoder;
|
||||
if (id < XZ_ID_Delta || id > XZ_ID_SPARC)
|
||||
CXzBcFilterState *decoder;
|
||||
if (!XZ_IS_SUPPORTED_FILTER_ID(id))
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
decoder = (CBraState *)p->p;
|
||||
decoder = (CXzBcFilterState *)p->p;
|
||||
if (!decoder)
|
||||
{
|
||||
decoder = (CBraState *)ISzAlloc_Alloc(alloc, sizeof(CBraState));
|
||||
decoder = (CXzBcFilterState *)ISzAlloc_Alloc(alloc, sizeof(CXzBcFilterState));
|
||||
if (!decoder)
|
||||
return SZ_ERROR_MEM;
|
||||
decoder->buf = ISzAlloc_Alloc(alloc, BRA_BUF_SIZE);
|
||||
if (!decoder->buf)
|
||||
{
|
||||
ISzAlloc_Free(alloc, decoder);
|
||||
return SZ_ERROR_MEM;
|
||||
}
|
||||
p->p = decoder;
|
||||
p->Free = BraState_Free;
|
||||
p->SetProps = BraState_SetProps;
|
||||
p->Init = BraState_Init;
|
||||
p->Code2 = BraState_Code2;
|
||||
p->Filter = BraState_Filter;
|
||||
p->Free = XzBcFilterState_Free;
|
||||
p->SetProps = XzBcFilterState_SetProps;
|
||||
p->Init = XzBcFilterState_Init;
|
||||
p->Code2 = XzBcFilterState_Code2;
|
||||
p->Filter = XzBcFilterState_Filter;
|
||||
decoder->filter_func = func;
|
||||
}
|
||||
decoder->methodId = (UInt32)id;
|
||||
decoder->encodeMode = encodeMode;
|
||||
decoder->base.methodId = (UInt32)id;
|
||||
// decoder->encodeMode = encodeMode;
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
|
@ -279,9 +310,9 @@ static void SbState_Free(void *pp, ISzAllocPtr alloc)
|
|||
|
||||
static SRes SbState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc)
|
||||
{
|
||||
UNUSED_VAR(pp);
|
||||
UNUSED_VAR(props);
|
||||
UNUSED_VAR(alloc);
|
||||
UNUSED_VAR(pp)
|
||||
UNUSED_VAR(props)
|
||||
UNUSED_VAR(alloc)
|
||||
return (propSize == 0) ? SZ_OK : SZ_ERROR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
@ -297,7 +328,7 @@ static SRes SbState_Code2(void *pp, Byte *dest, SizeT *destLen, const Byte *src,
|
|||
{
|
||||
CSbDec *p = (CSbDec *)pp;
|
||||
SRes res;
|
||||
UNUSED_VAR(srcWasFinished);
|
||||
UNUSED_VAR(srcWasFinished)
|
||||
p->dest = dest;
|
||||
p->destLen = *destLen;
|
||||
p->src = src;
|
||||
|
@ -389,7 +420,7 @@ static SRes Lzma2State_Code2(void *pp, Byte *dest, SizeT *destLen, const Byte *s
|
|||
ELzmaStatus status2;
|
||||
/* ELzmaFinishMode fm = (finishMode == LZMA_FINISH_ANY) ? LZMA_FINISH_ANY : LZMA_FINISH_END; */
|
||||
SRes res;
|
||||
UNUSED_VAR(srcWasFinished);
|
||||
UNUSED_VAR(srcWasFinished)
|
||||
if (spec->outBufMode)
|
||||
{
|
||||
SizeT dicPos = spec->decoder.decoder.dicPos;
|
||||
|
@ -420,7 +451,7 @@ static SRes Lzma2State_SetFromMethod(IStateCoder *p, Byte *outBuf, size_t outBuf
|
|||
p->Init = Lzma2State_Init;
|
||||
p->Code2 = Lzma2State_Code2;
|
||||
p->Filter = NULL;
|
||||
Lzma2Dec_Construct(&spec->decoder);
|
||||
Lzma2Dec_CONSTRUCT(&spec->decoder)
|
||||
}
|
||||
spec->outBufMode = False;
|
||||
if (outBuf)
|
||||
|
@ -519,7 +550,8 @@ static SRes MixCoder_SetFromMethod(CMixCoder *p, unsigned coderIndex, UInt64 met
|
|||
}
|
||||
if (coderIndex == 0)
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
return BraState_SetFromMethod(sc, methodId, 0, p->alloc);
|
||||
return Xz_StateCoder_Bc_SetFromMethod_Func(sc, methodId,
|
||||
XzBcFilterStateBase_Filter_Dec, p->alloc);
|
||||
}
|
||||
|
||||
|
||||
|
@ -568,7 +600,7 @@ static SRes MixCoder_Code(CMixCoder *p,
|
|||
SizeT destLen2, srcLen2;
|
||||
int wasFinished;
|
||||
|
||||
PRF_STR("------- MixCoder Single ----------");
|
||||
PRF_STR("------- MixCoder Single ----------")
|
||||
|
||||
srcLen2 = srcLenOrig;
|
||||
destLen2 = destLenOrig;
|
||||
|
@ -615,14 +647,14 @@ static SRes MixCoder_Code(CMixCoder *p,
|
|||
processed = coder->Filter(coder->p, p->outBuf, processed);
|
||||
if (wasFinished || (destFinish && p->outWritten == destLenOrig))
|
||||
processed = p->outWritten;
|
||||
PRF_STR_INT("filter", i);
|
||||
PRF_STR_INT("filter", i)
|
||||
}
|
||||
*destLen = processed;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
PRF_STR("standard mix");
|
||||
PRF_STR("standard mix")
|
||||
|
||||
if (p->numCoders != 1)
|
||||
{
|
||||
|
@ -779,7 +811,7 @@ static BoolInt Xz_CheckFooter(CXzStreamFlags flags, UInt64 indexSize, const Byte
|
|||
|
||||
static BoolInt XzBlock_AreSupportedFilters(const CXzBlock *p)
|
||||
{
|
||||
unsigned numFilters = XzBlock_GetNumFilters(p) - 1;
|
||||
const unsigned numFilters = XzBlock_GetNumFilters(p) - 1;
|
||||
unsigned i;
|
||||
{
|
||||
const CXzFilter *f = &p->filters[numFilters];
|
||||
|
@ -795,8 +827,7 @@ static BoolInt XzBlock_AreSupportedFilters(const CXzBlock *p)
|
|||
if (f->propsSize != 1)
|
||||
return False;
|
||||
}
|
||||
else if (f->id < XZ_ID_Delta
|
||||
|| f->id > XZ_ID_SPARC
|
||||
else if (!XZ_IS_SUPPORTED_FILTER_ID(f->id)
|
||||
|| (f->propsSize != 0 && f->propsSize != 4))
|
||||
return False;
|
||||
}
|
||||
|
@ -821,22 +852,24 @@ SRes XzBlock_Parse(CXzBlock *p, const Byte *header)
|
|||
p->packSize = (UInt64)(Int64)-1;
|
||||
if (XzBlock_HasPackSize(p))
|
||||
{
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &p->packSize);
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &p->packSize)
|
||||
if (p->packSize == 0 || p->packSize + headerSize >= (UInt64)1 << 63)
|
||||
return SZ_ERROR_ARCHIVE;
|
||||
}
|
||||
|
||||
p->unpackSize = (UInt64)(Int64)-1;
|
||||
if (XzBlock_HasUnpackSize(p))
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &p->unpackSize);
|
||||
{
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &p->unpackSize)
|
||||
}
|
||||
|
||||
numFilters = XzBlock_GetNumFilters(p);
|
||||
for (i = 0; i < numFilters; i++)
|
||||
{
|
||||
CXzFilter *filter = p->filters + i;
|
||||
UInt64 size;
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &filter->id);
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &size);
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &filter->id)
|
||||
READ_VARINT_AND_CHECK(header, pos, headerSize, &size)
|
||||
if (size > headerSize - pos || size > XZ_FILTER_PROPS_SIZE_MAX)
|
||||
return SZ_ERROR_ARCHIVE;
|
||||
filter->propsSize = (UInt32)size;
|
||||
|
@ -894,20 +927,20 @@ static SRes XzDecMix_Init(CMixCoder *p, const CXzBlock *block, Byte *outBuf, siz
|
|||
MixCoder_Free(p);
|
||||
for (i = 0; i < numFilters; i++)
|
||||
{
|
||||
RINOK(MixCoder_SetFromMethod(p, i, block->filters[numFilters - 1 - i].id, outBuf, outBufSize));
|
||||
RINOK(MixCoder_SetFromMethod(p, i, block->filters[numFilters - 1 - i].id, outBuf, outBufSize))
|
||||
}
|
||||
p->numCoders = numFilters;
|
||||
}
|
||||
else
|
||||
{
|
||||
RINOK(MixCoder_ResetFromMethod(p, 0, block->filters[numFilters - 1].id, outBuf, outBufSize));
|
||||
RINOK(MixCoder_ResetFromMethod(p, 0, block->filters[numFilters - 1].id, outBuf, outBufSize))
|
||||
}
|
||||
|
||||
for (i = 0; i < numFilters; i++)
|
||||
{
|
||||
const CXzFilter *f = &block->filters[numFilters - 1 - i];
|
||||
IStateCoder *sc = &p->coders[i];
|
||||
RINOK(sc->SetProps(sc->p, f->props, f->propsSize, p->alloc));
|
||||
RINOK(sc->SetProps(sc->p, f->props, f->propsSize, p->alloc))
|
||||
}
|
||||
|
||||
MixCoder_Init(p);
|
||||
|
@ -1054,14 +1087,14 @@ SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
|
|||
(*destLen) += destLen2;
|
||||
p->unpackSize += destLen2;
|
||||
|
||||
RINOK(res);
|
||||
RINOK(res)
|
||||
|
||||
if (*status != CODER_STATUS_FINISHED_WITH_MARK)
|
||||
{
|
||||
if (p->block.packSize == p->packSize
|
||||
&& *status == CODER_STATUS_NEEDS_MORE_INPUT)
|
||||
{
|
||||
PRF_STR("CODER_STATUS_NEEDS_MORE_INPUT");
|
||||
PRF_STR("CODER_STATUS_NEEDS_MORE_INPUT")
|
||||
*status = CODER_STATUS_NOT_SPECIFIED;
|
||||
return SZ_ERROR_DATA;
|
||||
}
|
||||
|
@ -1078,7 +1111,7 @@ SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
|
|||
if ((p->block.packSize != (UInt64)(Int64)-1 && p->block.packSize != p->packSize)
|
||||
|| (p->block.unpackSize != (UInt64)(Int64)-1 && p->block.unpackSize != p->unpackSize))
|
||||
{
|
||||
PRF_STR("ERROR: block.size mismatch");
|
||||
PRF_STR("ERROR: block.size mismatch")
|
||||
return SZ_ERROR_DATA;
|
||||
}
|
||||
}
|
||||
|
@ -1109,7 +1142,7 @@ SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
|
|||
}
|
||||
else
|
||||
{
|
||||
RINOK(Xz_ParseHeader(&p->streamFlags, p->buf));
|
||||
RINOK(Xz_ParseHeader(&p->streamFlags, p->buf))
|
||||
p->numStartedStreams++;
|
||||
p->indexSize = 0;
|
||||
p->numBlocks = 0;
|
||||
|
@ -1155,7 +1188,7 @@ SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
|
|||
}
|
||||
else
|
||||
{
|
||||
RINOK(XzBlock_Parse(&p->block, p->buf));
|
||||
RINOK(XzBlock_Parse(&p->block, p->buf))
|
||||
if (!XzBlock_AreSupportedFilters(&p->block))
|
||||
return SZ_ERROR_UNSUPPORTED;
|
||||
p->numTotalBlocks++;
|
||||
|
@ -1168,7 +1201,7 @@ SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
|
|||
p->headerParsedOk = True;
|
||||
return SZ_OK;
|
||||
}
|
||||
RINOK(XzDecMix_Init(&p->decoder, &p->block, p->outBuf, p->outBufSize));
|
||||
RINOK(XzDecMix_Init(&p->decoder, &p->block, p->outBuf, p->outBufSize))
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1389,7 +1422,7 @@ UInt64 XzUnpacker_GetExtraSize(const CXzUnpacker *p)
|
|||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
#include "MtDec.h"
|
||||
#endif
|
||||
|
||||
|
@ -1400,7 +1433,7 @@ void XzDecMtProps_Init(CXzDecMtProps *p)
|
|||
p->outStep_ST = 1 << 20;
|
||||
p->ignoreErrors = False;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->numThreads = 1;
|
||||
p->inBufSize_MT = 1 << 18;
|
||||
p->memUseMax = sizeof(size_t) << 28;
|
||||
|
@ -1409,7 +1442,7 @@ void XzDecMtProps_Init(CXzDecMtProps *p)
|
|||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
/* ---------- CXzDecMtThread ---------- */
|
||||
|
||||
|
@ -1448,7 +1481,7 @@ typedef struct
|
|||
|
||||
/* ---------- CXzDecMt ---------- */
|
||||
|
||||
typedef struct
|
||||
struct CXzDecMt
|
||||
{
|
||||
CAlignOffsetAlloc alignOffsetAlloc;
|
||||
ISzAllocPtr allocMid;
|
||||
|
@ -1456,9 +1489,9 @@ typedef struct
|
|||
CXzDecMtProps props;
|
||||
size_t unpackBlockMaxSize;
|
||||
|
||||
ISeqInStream *inStream;
|
||||
ISeqOutStream *outStream;
|
||||
ICompressProgress *progress;
|
||||
ISeqInStreamPtr inStream;
|
||||
ISeqOutStreamPtr outStream;
|
||||
ICompressProgressPtr progress;
|
||||
|
||||
BoolInt finishMode;
|
||||
BoolInt outSize_Defined;
|
||||
|
@ -1481,7 +1514,7 @@ typedef struct
|
|||
ECoderStatus status;
|
||||
SRes codeRes;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
BoolInt mainDecoderWasCalled;
|
||||
// int statErrorDefined;
|
||||
int finishedDecoderIndex;
|
||||
|
@ -1504,10 +1537,9 @@ typedef struct
|
|||
|
||||
BoolInt mtc_WasConstructed;
|
||||
CMtDec mtc;
|
||||
CXzDecMtThread coders[MTDEC__THREADS_MAX];
|
||||
CXzDecMtThread coders[MTDEC_THREADS_MAX];
|
||||
#endif
|
||||
|
||||
} CXzDecMt;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
@ -1535,11 +1567,11 @@ CXzDecMtHandle XzDecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid)
|
|||
|
||||
XzDecMtProps_Init(&p->props);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->mtc_WasConstructed = False;
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTDEC__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTDEC_THREADS_MAX; i++)
|
||||
{
|
||||
CXzDecMtThread *coder = &p->coders[i];
|
||||
coder->dec_created = False;
|
||||
|
@ -1549,16 +1581,16 @@ CXzDecMtHandle XzDecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid)
|
|||
}
|
||||
#endif
|
||||
|
||||
return p;
|
||||
return (CXzDecMtHandle)p;
|
||||
}
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static void XzDecMt_FreeOutBufs(CXzDecMt *p)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTDEC__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTDEC_THREADS_MAX; i++)
|
||||
{
|
||||
CXzDecMtThread *coder = &p->coders[i];
|
||||
if (coder->outBuf)
|
||||
|
@ -1595,13 +1627,15 @@ static void XzDecMt_FreeSt(CXzDecMt *p)
|
|||
}
|
||||
|
||||
|
||||
void XzDecMt_Destroy(CXzDecMtHandle pp)
|
||||
// #define GET_CXzDecMt_p CXzDecMt *p = pp;
|
||||
|
||||
void XzDecMt_Destroy(CXzDecMtHandle p)
|
||||
{
|
||||
CXzDecMt *p = (CXzDecMt *)pp;
|
||||
// GET_CXzDecMt_p
|
||||
|
||||
XzDecMt_FreeSt(p);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
if (p->mtc_WasConstructed)
|
||||
{
|
||||
|
@ -1610,7 +1644,7 @@ void XzDecMt_Destroy(CXzDecMtHandle pp)
|
|||
}
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTDEC__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTDEC_THREADS_MAX; i++)
|
||||
{
|
||||
CXzDecMtThread *t = &p->coders[i];
|
||||
if (t->dec_created)
|
||||
|
@ -1625,12 +1659,12 @@ void XzDecMt_Destroy(CXzDecMtHandle pp)
|
|||
|
||||
#endif
|
||||
|
||||
ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, pp);
|
||||
ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, p);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static void XzDecMt_Callback_Parse(void *obj, unsigned coderIndex, CMtDecCallbackInfo *cc)
|
||||
{
|
||||
|
@ -1696,7 +1730,7 @@ static void XzDecMt_Callback_Parse(void *obj, unsigned coderIndex, CMtDecCallbac
|
|||
coder->dec.parseMode = True;
|
||||
coder->dec.headerParsedOk = False;
|
||||
|
||||
PRF_STR_INT("Parse", srcSize2);
|
||||
PRF_STR_INT("Parse", srcSize2)
|
||||
|
||||
res = XzUnpacker_Code(&coder->dec,
|
||||
NULL, &destSize,
|
||||
|
@ -2071,7 +2105,7 @@ static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex,
|
|||
}
|
||||
data += cur;
|
||||
size -= cur;
|
||||
// PRF_STR_INT("Written size =", size);
|
||||
// PRF_STR_INT("Written size =", size)
|
||||
if (size == 0)
|
||||
break;
|
||||
res = MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0);
|
||||
|
@ -2087,7 +2121,7 @@ static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex,
|
|||
return res;
|
||||
}
|
||||
|
||||
RINOK(res);
|
||||
RINOK(res)
|
||||
|
||||
if (coder->inPreSize != coder->inCodeSize
|
||||
|| coder->blockPackTotal != coder->inCodeSize)
|
||||
|
@ -2106,13 +2140,13 @@ static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex,
|
|||
// (coder->state == MTDEC_PARSE_END) means that there are no other working threads
|
||||
// so we can use mtc variables without lock
|
||||
|
||||
PRF_STR_INT("Write MTDEC_PARSE_END", me->mtc.inProcessed);
|
||||
PRF_STR_INT("Write MTDEC_PARSE_END", me->mtc.inProcessed)
|
||||
|
||||
me->mtc.mtProgress.totalInSize = me->mtc.inProcessed;
|
||||
{
|
||||
CXzUnpacker *dec = &me->dec;
|
||||
|
||||
PRF_STR_INT("PostSingle", srcSize);
|
||||
PRF_STR_INT("PostSingle", srcSize)
|
||||
|
||||
{
|
||||
size_t srcProcessed = srcSize;
|
||||
|
@ -2186,7 +2220,7 @@ static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex,
|
|||
me->mtc.crossEnd = srcSize;
|
||||
}
|
||||
|
||||
PRF_STR_INT("XZ_STATE_STREAM_HEADER crossEnd = ", (unsigned)me->mtc.crossEnd);
|
||||
PRF_STR_INT("XZ_STATE_STREAM_HEADER crossEnd = ", (unsigned)me->mtc.crossEnd)
|
||||
|
||||
return SZ_OK;
|
||||
}
|
||||
|
@ -2277,7 +2311,7 @@ static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex,
|
|||
UInt64 inDelta = me->mtc.inProcessed - inProgressPrev;
|
||||
if (inDelta >= (1 << 22))
|
||||
{
|
||||
RINOK(MtProgress_Progress_ST(&me->mtc.mtProgress));
|
||||
RINOK(MtProgress_Progress_ST(&me->mtc.mtProgress))
|
||||
inProgressPrev = me->mtc.inProcessed;
|
||||
}
|
||||
}
|
||||
|
@ -2331,7 +2365,7 @@ void XzStatInfo_Clear(CXzStatInfo *p)
|
|||
*/
|
||||
|
||||
static SRes XzDecMt_Decode_ST(CXzDecMt *p
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
, BoolInt tMode
|
||||
#endif
|
||||
, CXzStatInfo *stat)
|
||||
|
@ -2343,7 +2377,7 @@ static SRes XzDecMt_Decode_ST(CXzDecMt *p
|
|||
|
||||
CXzUnpacker *dec;
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (tMode)
|
||||
{
|
||||
XzDecMt_FreeOutBufs(p);
|
||||
|
@ -2400,7 +2434,7 @@ static SRes XzDecMt_Decode_ST(CXzDecMt *p
|
|||
|
||||
if (inPos == inLim)
|
||||
{
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (tMode)
|
||||
{
|
||||
inData = MtDec_Read(&p->mtc, &inLim);
|
||||
|
@ -2577,19 +2611,19 @@ static void XzStatInfo_SetStat(const CXzUnpacker *dec,
|
|||
|
||||
|
||||
|
||||
SRes XzDecMt_Decode(CXzDecMtHandle pp,
|
||||
SRes XzDecMt_Decode(CXzDecMtHandle p,
|
||||
const CXzDecMtProps *props,
|
||||
const UInt64 *outDataSize, int finishMode,
|
||||
ISeqOutStream *outStream,
|
||||
ISeqOutStreamPtr outStream,
|
||||
// Byte *outBuf, size_t *outBufSize,
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
// const Byte *inData, size_t inDataSize,
|
||||
CXzStatInfo *stat,
|
||||
int *isMT,
|
||||
ICompressProgress *progress)
|
||||
ICompressProgressPtr progress)
|
||||
{
|
||||
CXzDecMt *p = (CXzDecMt *)pp;
|
||||
#ifndef _7ZIP_ST
|
||||
// GET_CXzDecMt_p
|
||||
#ifndef Z7_ST
|
||||
BoolInt tMode;
|
||||
#endif
|
||||
|
||||
|
@ -2640,7 +2674,7 @@ SRes XzDecMt_Decode(CXzDecMtHandle pp,
|
|||
*/
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
p->isBlockHeaderState_Parse = False;
|
||||
p->isBlockHeaderState_Write = False;
|
||||
|
@ -2782,7 +2816,7 @@ SRes XzDecMt_Decode(CXzDecMtHandle pp,
|
|||
return res;
|
||||
}
|
||||
|
||||
PRF_STR("----- decoding ST -----");
|
||||
PRF_STR("----- decoding ST -----")
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -2792,13 +2826,13 @@ SRes XzDecMt_Decode(CXzDecMtHandle pp,
|
|||
|
||||
{
|
||||
SRes res = XzDecMt_Decode_ST(p
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
, tMode
|
||||
#endif
|
||||
, stat
|
||||
);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
// we must set error code from MT decoding at first
|
||||
if (p->mainErrorCode != SZ_OK)
|
||||
stat->DecodeRes = p->mainErrorCode;
|
||||
|
@ -2835,3 +2869,7 @@ SRes XzDecMt_Decode(CXzDecMtHandle pp,
|
|||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
#undef PRF
|
||||
#undef PRF_STR
|
||||
#undef PRF_STR_INT_2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* XzEnc.c -- Xz Encode
|
||||
2021-04-01 : Igor Pavlov : Public domain */
|
||||
2023-04-13 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -18,13 +18,13 @@
|
|||
|
||||
#include "XzEnc.h"
|
||||
|
||||
// #define _7ZIP_ST
|
||||
// #define Z7_ST
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
#include "MtCoder.h"
|
||||
#else
|
||||
#define MTCODER__THREADS_MAX 1
|
||||
#define MTCODER__BLOCKS_MAX 1
|
||||
#define MTCODER_THREADS_MAX 1
|
||||
#define MTCODER_BLOCKS_MAX 1
|
||||
#endif
|
||||
|
||||
#define XZ_GET_PAD_SIZE(dataSize) ((4 - ((unsigned)(dataSize) & 3)) & 3)
|
||||
|
@ -35,25 +35,25 @@
|
|||
#define XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(unpackSize) (XZ_BLOCK_HEADER_SIZE_MAX + XZ_GET_MAX_BLOCK_PACK_SIZE(unpackSize))
|
||||
|
||||
|
||||
#define XzBlock_ClearFlags(p) (p)->flags = 0;
|
||||
#define XzBlock_SetNumFilters(p, n) (p)->flags = (Byte)((p)->flags | ((n) - 1));
|
||||
// #define XzBlock_ClearFlags(p) (p)->flags = 0;
|
||||
#define XzBlock_ClearFlags_SetNumFilters(p, n) (p)->flags = (Byte)((n) - 1);
|
||||
#define XzBlock_SetHasPackSize(p) (p)->flags |= XZ_BF_PACK_SIZE;
|
||||
#define XzBlock_SetHasUnpackSize(p) (p)->flags |= XZ_BF_UNPACK_SIZE;
|
||||
|
||||
|
||||
static SRes WriteBytes(ISeqOutStream *s, const void *buf, size_t size)
|
||||
static SRes WriteBytes(ISeqOutStreamPtr s, const void *buf, size_t size)
|
||||
{
|
||||
return (ISeqOutStream_Write(s, buf, size) == size) ? SZ_OK : SZ_ERROR_WRITE;
|
||||
}
|
||||
|
||||
static SRes WriteBytesUpdateCrc(ISeqOutStream *s, const void *buf, size_t size, UInt32 *crc)
|
||||
static SRes WriteBytes_UpdateCrc(ISeqOutStreamPtr s, const void *buf, size_t size, UInt32 *crc)
|
||||
{
|
||||
*crc = CrcUpdate(*crc, buf, size);
|
||||
return WriteBytes(s, buf, size);
|
||||
}
|
||||
|
||||
|
||||
static SRes Xz_WriteHeader(CXzStreamFlags f, ISeqOutStream *s)
|
||||
static SRes Xz_WriteHeader(CXzStreamFlags f, ISeqOutStreamPtr s)
|
||||
{
|
||||
UInt32 crc;
|
||||
Byte header[XZ_STREAM_HEADER_SIZE];
|
||||
|
@ -61,12 +61,12 @@ static SRes Xz_WriteHeader(CXzStreamFlags f, ISeqOutStream *s)
|
|||
header[XZ_SIG_SIZE] = (Byte)(f >> 8);
|
||||
header[XZ_SIG_SIZE + 1] = (Byte)(f & 0xFF);
|
||||
crc = CrcCalc(header + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE);
|
||||
SetUi32(header + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE, crc);
|
||||
SetUi32(header + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE, crc)
|
||||
return WriteBytes(s, header, XZ_STREAM_HEADER_SIZE);
|
||||
}
|
||||
|
||||
|
||||
static SRes XzBlock_WriteHeader(const CXzBlock *p, ISeqOutStream *s)
|
||||
static SRes XzBlock_WriteHeader(const CXzBlock *p, ISeqOutStreamPtr s)
|
||||
{
|
||||
Byte header[XZ_BLOCK_HEADER_SIZE_MAX];
|
||||
|
||||
|
@ -91,7 +91,7 @@ static SRes XzBlock_WriteHeader(const CXzBlock *p, ISeqOutStream *s)
|
|||
header[pos++] = 0;
|
||||
|
||||
header[0] = (Byte)(pos >> 2);
|
||||
SetUi32(header + pos, CrcCalc(header, pos));
|
||||
SetUi32(header + pos, CrcCalc(header, pos))
|
||||
return WriteBytes(s, header, pos + 4);
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ static SRes XzEncIndex_AddIndexRecord(CXzEncIndex *p, UInt64 unpackSize, UInt64
|
|||
size_t newSize = p->allocated * 2 + 16 * 2;
|
||||
if (newSize < p->size + pos)
|
||||
return SZ_ERROR_MEM;
|
||||
RINOK(XzEncIndex_ReAlloc(p, newSize, alloc));
|
||||
RINOK(XzEncIndex_ReAlloc(p, newSize, alloc))
|
||||
}
|
||||
memcpy(p->blocks + p->size, buf, pos);
|
||||
p->size += pos;
|
||||
|
@ -191,7 +191,7 @@ static SRes XzEncIndex_AddIndexRecord(CXzEncIndex *p, UInt64 unpackSize, UInt64
|
|||
}
|
||||
|
||||
|
||||
static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, ISeqOutStream *s)
|
||||
static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, ISeqOutStreamPtr s)
|
||||
{
|
||||
Byte buf[32];
|
||||
UInt64 globalPos;
|
||||
|
@ -200,8 +200,8 @@ static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, I
|
|||
|
||||
globalPos = pos;
|
||||
buf[0] = 0;
|
||||
RINOK(WriteBytesUpdateCrc(s, buf, pos, &crc));
|
||||
RINOK(WriteBytesUpdateCrc(s, p->blocks, p->size, &crc));
|
||||
RINOK(WriteBytes_UpdateCrc(s, buf, pos, &crc))
|
||||
RINOK(WriteBytes_UpdateCrc(s, p->blocks, p->size, &crc))
|
||||
globalPos += p->size;
|
||||
|
||||
pos = XZ_GET_PAD_SIZE(globalPos);
|
||||
|
@ -211,12 +211,12 @@ static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, I
|
|||
globalPos += pos;
|
||||
|
||||
crc = CrcUpdate(crc, buf + 4 - pos, pos);
|
||||
SetUi32(buf + 4, CRC_GET_DIGEST(crc));
|
||||
SetUi32(buf + 4, CRC_GET_DIGEST(crc))
|
||||
|
||||
SetUi32(buf + 8 + 4, (UInt32)(globalPos >> 2));
|
||||
SetUi32(buf + 8 + 4, (UInt32)(globalPos >> 2))
|
||||
buf[8 + 8] = (Byte)(flags >> 8);
|
||||
buf[8 + 9] = (Byte)(flags & 0xFF);
|
||||
SetUi32(buf + 8, CrcCalc(buf + 8 + 4, 6));
|
||||
SetUi32(buf + 8, CrcCalc(buf + 8 + 4, 6))
|
||||
buf[8 + 10] = XZ_FOOTER_SIG_0;
|
||||
buf[8 + 11] = XZ_FOOTER_SIG_1;
|
||||
|
||||
|
@ -230,7 +230,7 @@ static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, I
|
|||
typedef struct
|
||||
{
|
||||
ISeqInStream vt;
|
||||
ISeqInStream *realStream;
|
||||
ISeqInStreamPtr realStream;
|
||||
const Byte *data;
|
||||
UInt64 limit;
|
||||
UInt64 processed;
|
||||
|
@ -251,9 +251,9 @@ static void SeqCheckInStream_GetDigest(CSeqCheckInStream *p, Byte *digest)
|
|||
XzCheck_Final(&p->check, digest);
|
||||
}
|
||||
|
||||
static SRes SeqCheckInStream_Read(const ISeqInStream *pp, void *data, size_t *size)
|
||||
static SRes SeqCheckInStream_Read(ISeqInStreamPtr pp, void *data, size_t *size)
|
||||
{
|
||||
CSeqCheckInStream *p = CONTAINER_FROM_VTBL(pp, CSeqCheckInStream, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSeqCheckInStream)
|
||||
size_t size2 = *size;
|
||||
SRes res = SZ_OK;
|
||||
|
||||
|
@ -285,15 +285,15 @@ static SRes SeqCheckInStream_Read(const ISeqInStream *pp, void *data, size_t *si
|
|||
typedef struct
|
||||
{
|
||||
ISeqOutStream vt;
|
||||
ISeqOutStream *realStream;
|
||||
ISeqOutStreamPtr realStream;
|
||||
Byte *outBuf;
|
||||
size_t outBufLimit;
|
||||
UInt64 processed;
|
||||
} CSeqSizeOutStream;
|
||||
|
||||
static size_t SeqSizeOutStream_Write(const ISeqOutStream *pp, const void *data, size_t size)
|
||||
static size_t SeqSizeOutStream_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
|
||||
{
|
||||
CSeqSizeOutStream *p = CONTAINER_FROM_VTBL(pp, CSeqSizeOutStream, vt);
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSeqSizeOutStream)
|
||||
if (p->realStream)
|
||||
size = ISeqOutStream_Write(p->realStream, data, size);
|
||||
else
|
||||
|
@ -313,8 +313,8 @@ static size_t SeqSizeOutStream_Write(const ISeqOutStream *pp, const void *data,
|
|||
|
||||
typedef struct
|
||||
{
|
||||
ISeqInStream p;
|
||||
ISeqInStream *realStream;
|
||||
ISeqInStream vt;
|
||||
ISeqInStreamPtr realStream;
|
||||
IStateCoder StateCoder;
|
||||
Byte *buf;
|
||||
size_t curPos;
|
||||
|
@ -323,7 +323,39 @@ typedef struct
|
|||
} CSeqInFilter;
|
||||
|
||||
|
||||
SRes BraState_SetFromMethod(IStateCoder *p, UInt64 id, int encodeMode, ISzAllocPtr alloc);
|
||||
static const z7_Func_BranchConv g_Funcs_BranchConv_RISC_Enc[] =
|
||||
{
|
||||
Z7_BRANCH_CONV_ENC(PPC),
|
||||
Z7_BRANCH_CONV_ENC(IA64),
|
||||
Z7_BRANCH_CONV_ENC(ARM),
|
||||
Z7_BRANCH_CONV_ENC(ARMT),
|
||||
Z7_BRANCH_CONV_ENC(SPARC),
|
||||
Z7_BRANCH_CONV_ENC(ARM64)
|
||||
};
|
||||
|
||||
static SizeT XzBcFilterStateBase_Filter_Enc(CXzBcFilterStateBase *p, Byte *data, SizeT size)
|
||||
{
|
||||
switch (p->methodId)
|
||||
{
|
||||
case XZ_ID_Delta:
|
||||
Delta_Encode(p->delta_State, p->delta, data, size);
|
||||
break;
|
||||
case XZ_ID_X86:
|
||||
size = (SizeT)(z7_BranchConvSt_X86_Enc(data, size, p->ip, &p->X86_State) - data);
|
||||
break;
|
||||
default:
|
||||
if (p->methodId >= XZ_ID_PPC)
|
||||
{
|
||||
const UInt32 i = p->methodId - XZ_ID_PPC;
|
||||
if (i < Z7_ARRAY_SIZE(g_Funcs_BranchConv_RISC_Enc))
|
||||
size = (SizeT)(g_Funcs_BranchConv_RISC_Enc[i](data, size, p->ip) - data);
|
||||
}
|
||||
break;
|
||||
}
|
||||
p->ip += (UInt32)size;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
static SRes SeqInFilter_Init(CSeqInFilter *p, const CXzFilter *props, ISzAllocPtr alloc)
|
||||
{
|
||||
|
@ -335,17 +367,17 @@ static SRes SeqInFilter_Init(CSeqInFilter *p, const CXzFilter *props, ISzAllocPt
|
|||
}
|
||||
p->curPos = p->endPos = 0;
|
||||
p->srcWasFinished = 0;
|
||||
RINOK(BraState_SetFromMethod(&p->StateCoder, props->id, 1, alloc));
|
||||
RINOK(p->StateCoder.SetProps(p->StateCoder.p, props->props, props->propsSize, alloc));
|
||||
RINOK(Xz_StateCoder_Bc_SetFromMethod_Func(&p->StateCoder, props->id, XzBcFilterStateBase_Filter_Enc, alloc))
|
||||
RINOK(p->StateCoder.SetProps(p->StateCoder.p, props->props, props->propsSize, alloc))
|
||||
p->StateCoder.Init(p->StateCoder.p);
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
|
||||
static SRes SeqInFilter_Read(const ISeqInStream *pp, void *data, size_t *size)
|
||||
static SRes SeqInFilter_Read(ISeqInStreamPtr pp, void *data, size_t *size)
|
||||
{
|
||||
CSeqInFilter *p = CONTAINER_FROM_VTBL(pp, CSeqInFilter, p);
|
||||
size_t sizeOriginal = *size;
|
||||
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CSeqInFilter)
|
||||
const size_t sizeOriginal = *size;
|
||||
if (sizeOriginal == 0)
|
||||
return SZ_OK;
|
||||
*size = 0;
|
||||
|
@ -356,7 +388,7 @@ static SRes SeqInFilter_Read(const ISeqInStream *pp, void *data, size_t *size)
|
|||
{
|
||||
p->curPos = 0;
|
||||
p->endPos = FILTER_BUF_SIZE;
|
||||
RINOK(ISeqInStream_Read(p->realStream, p->buf, &p->endPos));
|
||||
RINOK(ISeqInStream_Read(p->realStream, p->buf, &p->endPos))
|
||||
if (p->endPos == 0)
|
||||
p->srcWasFinished = 1;
|
||||
}
|
||||
|
@ -381,7 +413,7 @@ static void SeqInFilter_Construct(CSeqInFilter *p)
|
|||
{
|
||||
p->buf = NULL;
|
||||
p->StateCoder.p = NULL;
|
||||
p->p.Read = SeqInFilter_Read;
|
||||
p->vt.Read = SeqInFilter_Read;
|
||||
}
|
||||
|
||||
static void SeqInFilter_Free(CSeqInFilter *p, ISzAllocPtr alloc)
|
||||
|
@ -406,13 +438,13 @@ static void SeqInFilter_Free(CSeqInFilter *p, ISzAllocPtr alloc)
|
|||
typedef struct
|
||||
{
|
||||
ISeqInStream vt;
|
||||
ISeqInStream *inStream;
|
||||
ISeqInStreamPtr inStream;
|
||||
CSbEnc enc;
|
||||
} CSbEncInStream;
|
||||
|
||||
static SRes SbEncInStream_Read(const ISeqInStream *pp, void *data, size_t *size)
|
||||
static SRes SbEncInStream_Read(ISeqInStreamPtr pp, void *data, size_t *size)
|
||||
{
|
||||
CSbEncInStream *p = CONTAINER_FROM_VTBL(pp, CSbEncInStream, vt);
|
||||
CSbEncInStream *p = Z7_CONTAINER_FROM_VTBL(pp, CSbEncInStream, vt);
|
||||
size_t sizeOriginal = *size;
|
||||
if (sizeOriginal == 0)
|
||||
return SZ_OK;
|
||||
|
@ -422,7 +454,7 @@ static SRes SbEncInStream_Read(const ISeqInStream *pp, void *data, size_t *size)
|
|||
if (p->enc.needRead && !p->enc.readWasFinished)
|
||||
{
|
||||
size_t processed = p->enc.needReadSizeMax;
|
||||
RINOK(p->inStream->Read(p->inStream, p->enc.buf + p->enc.readPos, &processed));
|
||||
RINOK(p->inStream->Read(p->inStream, p->enc.buf + p->enc.readPos, &processed))
|
||||
p->enc.readPos += processed;
|
||||
if (processed == 0)
|
||||
{
|
||||
|
@ -433,7 +465,7 @@ static SRes SbEncInStream_Read(const ISeqInStream *pp, void *data, size_t *size)
|
|||
}
|
||||
|
||||
*size = sizeOriginal;
|
||||
RINOK(SbEnc_Read(&p->enc, data, size));
|
||||
RINOK(SbEnc_Read(&p->enc, data, size))
|
||||
if (*size != 0 || !p->enc.needRead)
|
||||
return SZ_OK;
|
||||
}
|
||||
|
@ -473,7 +505,7 @@ void XzFilterProps_Init(CXzFilterProps *p)
|
|||
void XzProps_Init(CXzProps *p)
|
||||
{
|
||||
p->checkId = XZ_CHECK_CRC32;
|
||||
p->blockSize = XZ_PROPS__BLOCK_SIZE__AUTO;
|
||||
p->blockSize = XZ_PROPS_BLOCK_SIZE_AUTO;
|
||||
p->numBlockThreads_Reduced = -1;
|
||||
p->numBlockThreads_Max = -1;
|
||||
p->numTotalThreads = -1;
|
||||
|
@ -502,8 +534,8 @@ static void XzEncProps_Normalize_Fixed(CXzProps *p)
|
|||
t2 = p->numBlockThreads_Max;
|
||||
t3 = p->numTotalThreads;
|
||||
|
||||
if (t2 > MTCODER__THREADS_MAX)
|
||||
t2 = MTCODER__THREADS_MAX;
|
||||
if (t2 > MTCODER_THREADS_MAX)
|
||||
t2 = MTCODER_THREADS_MAX;
|
||||
|
||||
if (t3 <= 0)
|
||||
{
|
||||
|
@ -519,8 +551,8 @@ static void XzEncProps_Normalize_Fixed(CXzProps *p)
|
|||
t1 = 1;
|
||||
t2 = t3;
|
||||
}
|
||||
if (t2 > MTCODER__THREADS_MAX)
|
||||
t2 = MTCODER__THREADS_MAX;
|
||||
if (t2 > MTCODER_THREADS_MAX)
|
||||
t2 = MTCODER_THREADS_MAX;
|
||||
}
|
||||
else if (t1 <= 0)
|
||||
{
|
||||
|
@ -571,7 +603,7 @@ static void XzProps_Normalize(CXzProps *p)
|
|||
/* we normalize xzProps properties, but we normalize only some of CXzProps::lzma2Props properties.
|
||||
Lzma2Enc_SetProps() will normalize lzma2Props later. */
|
||||
|
||||
if (p->blockSize == XZ_PROPS__BLOCK_SIZE__SOLID)
|
||||
if (p->blockSize == XZ_PROPS_BLOCK_SIZE_SOLID)
|
||||
{
|
||||
p->lzma2Props.lzmaProps.reduceSize = p->reduceSize;
|
||||
p->numBlockThreads_Reduced = 1;
|
||||
|
@ -583,15 +615,15 @@ static void XzProps_Normalize(CXzProps *p)
|
|||
else
|
||||
{
|
||||
CLzma2EncProps *lzma2 = &p->lzma2Props;
|
||||
if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO)
|
||||
if (p->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
|
||||
{
|
||||
// xz-auto
|
||||
p->lzma2Props.lzmaProps.reduceSize = p->reduceSize;
|
||||
|
||||
if (lzma2->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID)
|
||||
if (lzma2->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
|
||||
{
|
||||
// if (xz-auto && lzma2-solid) - we use solid for both
|
||||
p->blockSize = XZ_PROPS__BLOCK_SIZE__SOLID;
|
||||
p->blockSize = XZ_PROPS_BLOCK_SIZE_SOLID;
|
||||
p->numBlockThreads_Reduced = 1;
|
||||
p->numBlockThreads_Max = 1;
|
||||
if (p->lzma2Props.numTotalThreads <= 0)
|
||||
|
@ -610,9 +642,9 @@ static void XzProps_Normalize(CXzProps *p)
|
|||
p->blockSize = tp.blockSize; // fixed or solid
|
||||
p->numBlockThreads_Reduced = tp.numBlockThreads_Reduced;
|
||||
p->numBlockThreads_Max = tp.numBlockThreads_Max;
|
||||
if (lzma2->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO)
|
||||
lzma2->blockSize = tp.blockSize; // fixed or solid, LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
|
||||
if (lzma2->lzmaProps.reduceSize > tp.blockSize && tp.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID)
|
||||
if (lzma2->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
|
||||
lzma2->blockSize = tp.blockSize; // fixed or solid, LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID
|
||||
if (lzma2->lzmaProps.reduceSize > tp.blockSize && tp.blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
|
||||
lzma2->lzmaProps.reduceSize = tp.blockSize;
|
||||
lzma2->numBlockThreads_Reduced = 1;
|
||||
lzma2->numBlockThreads_Max = 1;
|
||||
|
@ -631,9 +663,9 @@ static void XzProps_Normalize(CXzProps *p)
|
|||
r = p->blockSize;
|
||||
lzma2->lzmaProps.reduceSize = r;
|
||||
}
|
||||
if (lzma2->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO)
|
||||
lzma2->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID;
|
||||
else if (lzma2->blockSize > p->blockSize && lzma2->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID)
|
||||
if (lzma2->blockSize == LZMA2_ENC_PROPS_BLOCK_SIZE_AUTO)
|
||||
lzma2->blockSize = LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID;
|
||||
else if (lzma2->blockSize > p->blockSize && lzma2->blockSize != LZMA2_ENC_PROPS_BLOCK_SIZE_SOLID)
|
||||
lzma2->blockSize = p->blockSize;
|
||||
|
||||
XzEncProps_Normalize_Fixed(p);
|
||||
|
@ -704,17 +736,17 @@ typedef struct
|
|||
static SRes Xz_CompressBlock(
|
||||
CLzma2WithFilters *lzmaf,
|
||||
|
||||
ISeqOutStream *outStream,
|
||||
ISeqOutStreamPtr outStream,
|
||||
Byte *outBufHeader,
|
||||
Byte *outBufData, size_t outBufDataLimit,
|
||||
|
||||
ISeqInStream *inStream,
|
||||
ISeqInStreamPtr inStream,
|
||||
// UInt64 expectedSize,
|
||||
const Byte *inBuf, // used if (!inStream)
|
||||
size_t inBufSize, // used if (!inStream), it's block size, props->blockSize is ignored
|
||||
|
||||
const CXzProps *props,
|
||||
ICompressProgress *progress,
|
||||
ICompressProgressPtr progress,
|
||||
int *inStreamFinished, /* only for inStream version */
|
||||
CXzEncBlockInfo *blockSizes,
|
||||
ISzAllocPtr alloc,
|
||||
|
@ -731,12 +763,12 @@ static SRes Xz_CompressBlock(
|
|||
|
||||
*inStreamFinished = False;
|
||||
|
||||
RINOK(Lzma2WithFilters_Create(lzmaf, alloc, allocBig));
|
||||
RINOK(Lzma2WithFilters_Create(lzmaf, alloc, allocBig))
|
||||
|
||||
RINOK(Lzma2Enc_SetProps(lzmaf->lzma2, &props->lzma2Props));
|
||||
RINOK(Lzma2Enc_SetProps(lzmaf->lzma2, &props->lzma2Props))
|
||||
|
||||
XzBlock_ClearFlags(&block);
|
||||
XzBlock_SetNumFilters(&block, 1 + (fp ? 1 : 0));
|
||||
// XzBlock_ClearFlags(&block)
|
||||
XzBlock_ClearFlags_SetNumFilters(&block, 1 + (fp ? 1 : 0))
|
||||
|
||||
if (fp)
|
||||
{
|
||||
|
@ -752,7 +784,7 @@ static SRes Xz_CompressBlock(
|
|||
else if (fp->ipDefined)
|
||||
{
|
||||
Byte *ptr = filter->props;
|
||||
SetUi32(ptr, fp->ip);
|
||||
SetUi32(ptr, fp->ip)
|
||||
filter->propsSize = 4;
|
||||
}
|
||||
}
|
||||
|
@ -777,13 +809,13 @@ static SRes Xz_CompressBlock(
|
|||
if (props->blockSize != (UInt64)(Int64)-1)
|
||||
if (expectedSize > props->blockSize)
|
||||
block.unpackSize = props->blockSize;
|
||||
XzBlock_SetHasUnpackSize(&block);
|
||||
XzBlock_SetHasUnpackSize(&block)
|
||||
}
|
||||
*/
|
||||
|
||||
if (outStream)
|
||||
{
|
||||
RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt));
|
||||
RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt))
|
||||
}
|
||||
|
||||
checkInStream.vt.Read = SeqCheckInStream_Read;
|
||||
|
@ -801,13 +833,13 @@ static SRes Xz_CompressBlock(
|
|||
if (fp->id == XZ_ID_Subblock)
|
||||
{
|
||||
lzmaf->sb.inStream = &checkInStream.vt;
|
||||
RINOK(SbEncInStream_Init(&lzmaf->sb));
|
||||
RINOK(SbEncInStream_Init(&lzmaf->sb))
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
lzmaf->filter.realStream = &checkInStream.vt;
|
||||
RINOK(SeqInFilter_Init(&lzmaf->filter, filter, alloc));
|
||||
RINOK(SeqInFilter_Init(&lzmaf->filter, filter, alloc))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -841,7 +873,7 @@ static SRes Xz_CompressBlock(
|
|||
#ifdef USE_SUBBLOCK
|
||||
(fp->id == XZ_ID_Subblock) ? &lzmaf->sb.vt:
|
||||
#endif
|
||||
&lzmaf->filter.p) :
|
||||
&lzmaf->filter.vt) :
|
||||
&checkInStream.vt) : NULL,
|
||||
|
||||
useStream ? NULL : inBuf,
|
||||
|
@ -852,7 +884,7 @@ static SRes Xz_CompressBlock(
|
|||
if (outBuf)
|
||||
seqSizeOutStream.processed += outSize;
|
||||
|
||||
RINOK(res);
|
||||
RINOK(res)
|
||||
blockSizes->unpackSize = checkInStream.processed;
|
||||
}
|
||||
{
|
||||
|
@ -866,7 +898,7 @@ static SRes Xz_CompressBlock(
|
|||
buf[3] = 0;
|
||||
|
||||
SeqCheckInStream_GetDigest(&checkInStream, buf + 4);
|
||||
RINOK(WriteBytes(&seqSizeOutStream.vt, buf + (4 - padSize), padSize + XzFlags_GetCheckSize((CXzStreamFlags)props->checkId)));
|
||||
RINOK(WriteBytes(&seqSizeOutStream.vt, buf + (4 - padSize), padSize + XzFlags_GetCheckSize((CXzStreamFlags)props->checkId)))
|
||||
|
||||
blockSizes->totalSize = seqSizeOutStream.processed - padSize;
|
||||
|
||||
|
@ -877,12 +909,12 @@ static SRes Xz_CompressBlock(
|
|||
seqSizeOutStream.processed = 0;
|
||||
|
||||
block.unpackSize = blockSizes->unpackSize;
|
||||
XzBlock_SetHasUnpackSize(&block);
|
||||
XzBlock_SetHasUnpackSize(&block)
|
||||
|
||||
block.packSize = packSize;
|
||||
XzBlock_SetHasPackSize(&block);
|
||||
XzBlock_SetHasPackSize(&block)
|
||||
|
||||
RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt));
|
||||
RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt))
|
||||
|
||||
blockSizes->headerSize = (size_t)seqSizeOutStream.processed;
|
||||
blockSizes->totalSize += seqSizeOutStream.processed;
|
||||
|
@ -906,15 +938,15 @@ static SRes Xz_CompressBlock(
|
|||
typedef struct
|
||||
{
|
||||
ICompressProgress vt;
|
||||
ICompressProgress *progress;
|
||||
ICompressProgressPtr progress;
|
||||
UInt64 inOffset;
|
||||
UInt64 outOffset;
|
||||
} CCompressProgress_XzEncOffset;
|
||||
|
||||
|
||||
static SRes CompressProgress_XzEncOffset_Progress(const ICompressProgress *pp, UInt64 inSize, UInt64 outSize)
|
||||
static SRes CompressProgress_XzEncOffset_Progress(ICompressProgressPtr pp, UInt64 inSize, UInt64 outSize)
|
||||
{
|
||||
const CCompressProgress_XzEncOffset *p = CONTAINER_FROM_VTBL(pp, CCompressProgress_XzEncOffset, vt);
|
||||
const CCompressProgress_XzEncOffset *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CCompressProgress_XzEncOffset, vt);
|
||||
inSize += p->inOffset;
|
||||
outSize += p->outOffset;
|
||||
return ICompressProgress_Progress(p->progress, inSize, outSize);
|
||||
|
@ -923,7 +955,7 @@ static SRes CompressProgress_XzEncOffset_Progress(const ICompressProgress *pp, U
|
|||
|
||||
|
||||
|
||||
typedef struct
|
||||
struct CXzEnc
|
||||
{
|
||||
ISzAllocPtr alloc;
|
||||
ISzAllocPtr allocBig;
|
||||
|
@ -933,20 +965,19 @@ typedef struct
|
|||
|
||||
CXzEncIndex xzIndex;
|
||||
|
||||
CLzma2WithFilters lzmaf_Items[MTCODER__THREADS_MAX];
|
||||
CLzma2WithFilters lzmaf_Items[MTCODER_THREADS_MAX];
|
||||
|
||||
size_t outBufSize; /* size of allocated outBufs[i] */
|
||||
Byte *outBufs[MTCODER__BLOCKS_MAX];
|
||||
Byte *outBufs[MTCODER_BLOCKS_MAX];
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
unsigned checkType;
|
||||
ISeqOutStream *outStream;
|
||||
ISeqOutStreamPtr outStream;
|
||||
BoolInt mtCoder_WasConstructed;
|
||||
CMtCoder mtCoder;
|
||||
CXzEncBlockInfo EncBlocks[MTCODER__BLOCKS_MAX];
|
||||
CXzEncBlockInfo EncBlocks[MTCODER_BLOCKS_MAX];
|
||||
#endif
|
||||
|
||||
} CXzEnc;
|
||||
};
|
||||
|
||||
|
||||
static void XzEnc_Construct(CXzEnc *p)
|
||||
|
@ -955,13 +986,13 @@ static void XzEnc_Construct(CXzEnc *p)
|
|||
|
||||
XzEncIndex_Construct(&p->xzIndex);
|
||||
|
||||
for (i = 0; i < MTCODER__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
Lzma2WithFilters_Construct(&p->lzmaf_Items[i]);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
p->mtCoder_WasConstructed = False;
|
||||
{
|
||||
for (i = 0; i < MTCODER__BLOCKS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
p->outBufs[i] = NULL;
|
||||
p->outBufSize = 0;
|
||||
}
|
||||
|
@ -972,7 +1003,7 @@ static void XzEnc_Construct(CXzEnc *p)
|
|||
static void XzEnc_FreeOutBufs(CXzEnc *p)
|
||||
{
|
||||
unsigned i;
|
||||
for (i = 0; i < MTCODER__BLOCKS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_BLOCKS_MAX; i++)
|
||||
if (p->outBufs[i])
|
||||
{
|
||||
ISzAlloc_Free(p->alloc, p->outBufs[i]);
|
||||
|
@ -988,10 +1019,10 @@ static void XzEnc_Free(CXzEnc *p, ISzAllocPtr alloc)
|
|||
|
||||
XzEncIndex_Free(&p->xzIndex, alloc);
|
||||
|
||||
for (i = 0; i < MTCODER__THREADS_MAX; i++)
|
||||
for (i = 0; i < MTCODER_THREADS_MAX; i++)
|
||||
Lzma2WithFilters_Free(&p->lzmaf_Items[i], alloc);
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (p->mtCoder_WasConstructed)
|
||||
{
|
||||
MtCoder_Destruct(&p->mtCoder);
|
||||
|
@ -1013,37 +1044,38 @@ CXzEncHandle XzEnc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig)
|
|||
p->expectedDataSize = (UInt64)(Int64)-1;
|
||||
p->alloc = alloc;
|
||||
p->allocBig = allocBig;
|
||||
return p;
|
||||
return (CXzEncHandle)p;
|
||||
}
|
||||
|
||||
// #define GET_CXzEnc_p CXzEnc *p = (CXzEnc *)(void *)pp;
|
||||
|
||||
void XzEnc_Destroy(CXzEncHandle pp)
|
||||
void XzEnc_Destroy(CXzEncHandle p)
|
||||
{
|
||||
CXzEnc *p = (CXzEnc *)pp;
|
||||
// GET_CXzEnc_p
|
||||
XzEnc_Free(p, p->alloc);
|
||||
ISzAlloc_Free(p->alloc, p);
|
||||
}
|
||||
|
||||
|
||||
SRes XzEnc_SetProps(CXzEncHandle pp, const CXzProps *props)
|
||||
SRes XzEnc_SetProps(CXzEncHandle p, const CXzProps *props)
|
||||
{
|
||||
CXzEnc *p = (CXzEnc *)pp;
|
||||
// GET_CXzEnc_p
|
||||
p->xzProps = *props;
|
||||
XzProps_Normalize(&p->xzProps);
|
||||
return SZ_OK;
|
||||
}
|
||||
|
||||
|
||||
void XzEnc_SetDataSize(CXzEncHandle pp, UInt64 expectedDataSiize)
|
||||
void XzEnc_SetDataSize(CXzEncHandle p, UInt64 expectedDataSiize)
|
||||
{
|
||||
CXzEnc *p = (CXzEnc *)pp;
|
||||
// GET_CXzEnc_p
|
||||
p->expectedDataSize = expectedDataSiize;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
|
||||
static SRes XzEnc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBufIndex,
|
||||
const Byte *src, size_t srcSize, int finished)
|
||||
|
@ -1073,7 +1105,7 @@ static SRes XzEnc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBuf
|
|||
|
||||
MtProgressThunk_CreateVTable(&progressThunk);
|
||||
progressThunk.mtProgress = &me->mtCoder.mtProgress;
|
||||
MtProgressThunk_Init(&progressThunk);
|
||||
MtProgressThunk_INIT(&progressThunk)
|
||||
|
||||
{
|
||||
CXzEncBlockInfo blockSizes;
|
||||
|
@ -1112,11 +1144,11 @@ static SRes XzEnc_MtCallback_Write(void *pp, unsigned outBufIndex)
|
|||
const CXzEncBlockInfo *bInfo = &me->EncBlocks[outBufIndex];
|
||||
const Byte *data = me->outBufs[outBufIndex];
|
||||
|
||||
RINOK(WriteBytes(me->outStream, data, bInfo->headerSize));
|
||||
RINOK(WriteBytes(me->outStream, data, bInfo->headerSize))
|
||||
|
||||
{
|
||||
UInt64 totalPackFull = bInfo->totalSize + XZ_GET_PAD_SIZE(bInfo->totalSize);
|
||||
RINOK(WriteBytes(me->outStream, data + XZ_BLOCK_HEADER_SIZE_MAX, (size_t)totalPackFull - bInfo->headerSize));
|
||||
RINOK(WriteBytes(me->outStream, data + XZ_BLOCK_HEADER_SIZE_MAX, (size_t)totalPackFull - bInfo->headerSize))
|
||||
}
|
||||
|
||||
return XzEncIndex_AddIndexRecord(&me->xzIndex, bInfo->unpackSize, bInfo->totalSize, me->alloc);
|
||||
|
@ -1126,9 +1158,9 @@ static SRes XzEnc_MtCallback_Write(void *pp, unsigned outBufIndex)
|
|||
|
||||
|
||||
|
||||
SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress)
|
||||
SRes XzEnc_Encode(CXzEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress)
|
||||
{
|
||||
CXzEnc *p = (CXzEnc *)pp;
|
||||
// GET_CXzEnc_p
|
||||
|
||||
const CXzProps *props = &p->xzProps;
|
||||
|
||||
|
@ -1137,7 +1169,7 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
UInt64 numBlocks = 1;
|
||||
UInt64 blockSize = props->blockSize;
|
||||
|
||||
if (blockSize != XZ_PROPS__BLOCK_SIZE__SOLID
|
||||
if (blockSize != XZ_PROPS_BLOCK_SIZE_SOLID
|
||||
&& props->reduceSize != (UInt64)(Int64)-1)
|
||||
{
|
||||
numBlocks = props->reduceSize / blockSize;
|
||||
|
@ -1147,13 +1179,13 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
else
|
||||
blockSize = (UInt64)1 << 62;
|
||||
|
||||
RINOK(XzEncIndex_PreAlloc(&p->xzIndex, numBlocks, blockSize, XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(blockSize), p->alloc));
|
||||
RINOK(XzEncIndex_PreAlloc(&p->xzIndex, numBlocks, blockSize, XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(blockSize), p->alloc))
|
||||
}
|
||||
|
||||
RINOK(Xz_WriteHeader((CXzStreamFlags)props->checkId, outStream));
|
||||
RINOK(Xz_WriteHeader((CXzStreamFlags)props->checkId, outStream))
|
||||
|
||||
|
||||
#ifndef _7ZIP_ST
|
||||
#ifndef Z7_ST
|
||||
if (props->numBlockThreads_Reduced > 1)
|
||||
{
|
||||
IMtCoderCallback2 vt;
|
||||
|
@ -1180,8 +1212,8 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
p->mtCoder.mtCallback = &vt;
|
||||
p->mtCoder.mtCallbackObject = p;
|
||||
|
||||
if ( props->blockSize == XZ_PROPS__BLOCK_SIZE__SOLID
|
||||
|| props->blockSize == XZ_PROPS__BLOCK_SIZE__AUTO)
|
||||
if ( props->blockSize == XZ_PROPS_BLOCK_SIZE_SOLID
|
||||
|| props->blockSize == XZ_PROPS_BLOCK_SIZE_AUTO)
|
||||
return SZ_ERROR_FAIL;
|
||||
|
||||
p->mtCoder.blockSize = (size_t)props->blockSize;
|
||||
|
@ -1200,7 +1232,7 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
p->mtCoder.numThreadsMax = (unsigned)props->numBlockThreads_Max;
|
||||
p->mtCoder.expectedDataSize = p->expectedDataSize;
|
||||
|
||||
RINOK(MtCoder_Code(&p->mtCoder));
|
||||
RINOK(MtCoder_Code(&p->mtCoder))
|
||||
}
|
||||
else
|
||||
#endif
|
||||
|
@ -1217,7 +1249,7 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
|
||||
writeStartSizes = 0;
|
||||
|
||||
if (props->blockSize != XZ_PROPS__BLOCK_SIZE__SOLID)
|
||||
if (props->blockSize != XZ_PROPS_BLOCK_SIZE_SOLID)
|
||||
{
|
||||
writeStartSizes = (props->forceWriteSizesInHeader > 0);
|
||||
|
||||
|
@ -1274,18 +1306,18 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
&inStreamFinished,
|
||||
&blockSizes,
|
||||
p->alloc,
|
||||
p->allocBig));
|
||||
p->allocBig))
|
||||
|
||||
{
|
||||
UInt64 totalPackFull = blockSizes.totalSize + XZ_GET_PAD_SIZE(blockSizes.totalSize);
|
||||
|
||||
if (writeStartSizes)
|
||||
{
|
||||
RINOK(WriteBytes(outStream, p->outBufs[0], blockSizes.headerSize));
|
||||
RINOK(WriteBytes(outStream, bufData, (size_t)totalPackFull - blockSizes.headerSize));
|
||||
RINOK(WriteBytes(outStream, p->outBufs[0], blockSizes.headerSize))
|
||||
RINOK(WriteBytes(outStream, bufData, (size_t)totalPackFull - blockSizes.headerSize))
|
||||
}
|
||||
|
||||
RINOK(XzEncIndex_AddIndexRecord(&p->xzIndex, blockSizes.unpackSize, blockSizes.totalSize, p->alloc));
|
||||
RINOK(XzEncIndex_AddIndexRecord(&p->xzIndex, blockSizes.unpackSize, blockSizes.totalSize, p->alloc))
|
||||
|
||||
progress2.inOffset += blockSizes.unpackSize;
|
||||
progress2.outOffset += totalPackFull;
|
||||
|
@ -1302,8 +1334,8 @@ SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStr
|
|||
|
||||
#include "Alloc.h"
|
||||
|
||||
SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream,
|
||||
const CXzProps *props, ICompressProgress *progress)
|
||||
SRes Xz_Encode(ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
|
||||
const CXzProps *props, ICompressProgressPtr progress)
|
||||
{
|
||||
SRes res;
|
||||
CXzEncHandle xz = XzEnc_Create(&g_Alloc, &g_BigAlloc);
|
||||
|
@ -1317,7 +1349,7 @@ SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream,
|
|||
}
|
||||
|
||||
|
||||
SRes Xz_EncodeEmpty(ISeqOutStream *outStream)
|
||||
SRes Xz_EncodeEmpty(ISeqOutStreamPtr outStream)
|
||||
{
|
||||
SRes res;
|
||||
CXzEncIndex xzIndex;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* XzIn.c - Xz input
|
||||
2021-09-04 : Igor Pavlov : Public domain */
|
||||
2023-04-02 : Igor Pavlov : Public domain */
|
||||
|
||||
#include "Precomp.h"
|
||||
|
||||
|
@ -15,11 +15,13 @@
|
|||
#define XZ_FOOTER_SIG_CHECK(p) ((p)[0] == XZ_FOOTER_SIG_0 && (p)[1] == XZ_FOOTER_SIG_1)
|
||||
|
||||
|
||||
SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream)
|
||||
SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStreamPtr inStream)
|
||||
{
|
||||
Byte sig[XZ_STREAM_HEADER_SIZE];
|
||||
RINOK(SeqInStream_Read2(inStream, sig, XZ_STREAM_HEADER_SIZE, SZ_ERROR_NO_ARCHIVE));
|
||||
if (memcmp(sig, XZ_SIG, XZ_SIG_SIZE) != 0)
|
||||
size_t processedSize = XZ_STREAM_HEADER_SIZE;
|
||||
RINOK(SeqInStream_ReadMax(inStream, sig, &processedSize))
|
||||
if (processedSize != XZ_STREAM_HEADER_SIZE
|
||||
|| memcmp(sig, XZ_SIG, XZ_SIG_SIZE) != 0)
|
||||
return SZ_ERROR_NO_ARCHIVE;
|
||||
return Xz_ParseHeader(p, sig);
|
||||
}
|
||||
|
@ -29,12 +31,12 @@ SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream)
|
|||
if (s == 0) return SZ_ERROR_ARCHIVE; \
|
||||
pos += s; }
|
||||
|
||||
SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, BoolInt *isIndex, UInt32 *headerSizeRes)
|
||||
SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStreamPtr inStream, BoolInt *isIndex, UInt32 *headerSizeRes)
|
||||
{
|
||||
Byte header[XZ_BLOCK_HEADER_SIZE_MAX];
|
||||
unsigned headerSize;
|
||||
*headerSizeRes = 0;
|
||||
RINOK(SeqInStream_ReadByte(inStream, &header[0]));
|
||||
RINOK(SeqInStream_ReadByte(inStream, &header[0]))
|
||||
headerSize = (unsigned)header[0];
|
||||
if (headerSize == 0)
|
||||
{
|
||||
|
@ -46,7 +48,12 @@ SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, BoolInt *isIndex, U
|
|||
*isIndex = False;
|
||||
headerSize = (headerSize << 2) + 4;
|
||||
*headerSizeRes = headerSize;
|
||||
RINOK(SeqInStream_Read(inStream, header + 1, headerSize - 1));
|
||||
{
|
||||
size_t processedSize = headerSize - 1;
|
||||
RINOK(SeqInStream_ReadMax(inStream, header + 1, &processedSize))
|
||||
if (processedSize != headerSize - 1)
|
||||
return SZ_ERROR_INPUT_EOF;
|
||||
}
|
||||
return XzBlock_Parse(p, header);
|
||||
}
|
||||
|
||||
|
@ -58,7 +65,9 @@ UInt64 Xz_GetUnpackSize(const CXzStream *p)
|
|||
UInt64 size = 0;
|
||||
size_t i;
|
||||
for (i = 0; i < p->numBlocks; i++)
|
||||
ADD_SIZE_CHECK(size, p->blocks[i].unpackSize);
|
||||
{
|
||||
ADD_SIZE_CHECK(size, p->blocks[i].unpackSize)
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -67,12 +76,14 @@ UInt64 Xz_GetPackSize(const CXzStream *p)
|
|||
UInt64 size = 0;
|
||||
size_t i;
|
||||
for (i = 0; i < p->numBlocks; i++)
|
||||
ADD_SIZE_CHECK(size, (p->blocks[i].totalSize + 3) & ~(UInt64)3);
|
||||
{
|
||||
ADD_SIZE_CHECK(size, (p->blocks[i].totalSize + 3) & ~(UInt64)3)
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
SRes XzBlock_ReadFooter(CXzBlock *p, CXzStreamFlags f, ISeqInStream *inStream)
|
||||
SRes XzBlock_ReadFooter(CXzBlock *p, CXzStreamFlags f, ISeqInStreamPtr inStream)
|
||||
{
|
||||
return SeqInStream_Read(inStream, p->check, XzFlags_GetCheckSize(f));
|
||||
}
|
||||
|
@ -93,7 +104,7 @@ static SRes Xz_ReadIndex2(CXzStream *p, const Byte *buf, size_t size, ISzAllocPt
|
|||
|
||||
{
|
||||
UInt64 numBlocks64;
|
||||
READ_VARINT_AND_CHECK(buf, pos, size, &numBlocks64);
|
||||
READ_VARINT_AND_CHECK(buf, pos, size, &numBlocks64)
|
||||
numBlocks = (size_t)numBlocks64;
|
||||
if (numBlocks != numBlocks64 || numBlocks * 2 > size)
|
||||
return SZ_ERROR_ARCHIVE;
|
||||
|
@ -110,8 +121,8 @@ static SRes Xz_ReadIndex2(CXzStream *p, const Byte *buf, size_t size, ISzAllocPt
|
|||
for (i = 0; i < numBlocks; i++)
|
||||
{
|
||||
CXzBlockSizes *block = &p->blocks[i];
|
||||
READ_VARINT_AND_CHECK(buf, pos, size, &block->totalSize);
|
||||
READ_VARINT_AND_CHECK(buf, pos, size, &block->unpackSize);
|
||||
READ_VARINT_AND_CHECK(buf, pos, size, &block->totalSize)
|
||||
READ_VARINT_AND_CHECK(buf, pos, size, &block->unpackSize)
|
||||
if (block->totalSize == 0)
|
||||
return SZ_ERROR_ARCHIVE;
|
||||
}
|
||||
|
@ -122,7 +133,7 @@ static SRes Xz_ReadIndex2(CXzStream *p, const Byte *buf, size_t size, ISzAllocPt
|
|||
return (pos == size) ? SZ_OK : SZ_ERROR_ARCHIVE;
|
||||
}
|
||||
|
||||
static SRes Xz_ReadIndex(CXzStream *p, ILookInStream *stream, UInt64 indexSize, ISzAllocPtr alloc)
|
||||
static SRes Xz_ReadIndex(CXzStream *p, ILookInStreamPtr stream, UInt64 indexSize, ISzAllocPtr alloc)
|
||||
{
|
||||
SRes res;
|
||||
size_t size;
|
||||
|
@ -142,14 +153,14 @@ static SRes Xz_ReadIndex(CXzStream *p, ILookInStream *stream, UInt64 indexSize,
|
|||
return res;
|
||||
}
|
||||
|
||||
static SRes LookInStream_SeekRead_ForArc(ILookInStream *stream, UInt64 offset, void *buf, size_t size)
|
||||
static SRes LookInStream_SeekRead_ForArc(ILookInStreamPtr stream, UInt64 offset, void *buf, size_t size)
|
||||
{
|
||||
RINOK(LookInStream_SeekTo(stream, offset));
|
||||
RINOK(LookInStream_SeekTo(stream, offset))
|
||||
return LookInStream_Read(stream, buf, size);
|
||||
/* return LookInStream_Read2(stream, buf, size, SZ_ERROR_NO_ARCHIVE); */
|
||||
}
|
||||
|
||||
static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOffset, ISzAllocPtr alloc)
|
||||
static SRes Xz_ReadBackward(CXzStream *p, ILookInStreamPtr stream, Int64 *startOffset, ISzAllocPtr alloc)
|
||||
{
|
||||
UInt64 indexSize;
|
||||
Byte buf[XZ_STREAM_FOOTER_SIZE];
|
||||
|
@ -159,7 +170,7 @@ static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOff
|
|||
return SZ_ERROR_NO_ARCHIVE;
|
||||
|
||||
pos -= XZ_STREAM_FOOTER_SIZE;
|
||||
RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE));
|
||||
RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE))
|
||||
|
||||
if (!XZ_FOOTER_SIG_CHECK(buf + 10))
|
||||
{
|
||||
|
@ -174,7 +185,7 @@ static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOff
|
|||
|
||||
i = (pos > TEMP_BUF_SIZE) ? TEMP_BUF_SIZE : (size_t)pos;
|
||||
pos -= i;
|
||||
RINOK(LookInStream_SeekRead_ForArc(stream, pos, temp, i));
|
||||
RINOK(LookInStream_SeekRead_ForArc(stream, pos, temp, i))
|
||||
total += (UInt32)i;
|
||||
for (; i != 0; i--)
|
||||
if (temp[i - 1] != 0)
|
||||
|
@ -193,7 +204,7 @@ static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOff
|
|||
if (pos < XZ_STREAM_FOOTER_SIZE)
|
||||
return SZ_ERROR_NO_ARCHIVE;
|
||||
pos -= XZ_STREAM_FOOTER_SIZE;
|
||||
RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE));
|
||||
RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE))
|
||||
if (!XZ_FOOTER_SIG_CHECK(buf + 10))
|
||||
return SZ_ERROR_NO_ARCHIVE;
|
||||
}
|
||||
|
@ -217,8 +228,8 @@ static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOff
|
|||
return SZ_ERROR_ARCHIVE;
|
||||
|
||||
pos -= indexSize;
|
||||
RINOK(LookInStream_SeekTo(stream, pos));
|
||||
RINOK(Xz_ReadIndex(p, stream, indexSize, alloc));
|
||||
RINOK(LookInStream_SeekTo(stream, pos))
|
||||
RINOK(Xz_ReadIndex(p, stream, indexSize, alloc))
|
||||
|
||||
{
|
||||
UInt64 totalSize = Xz_GetPackSize(p);
|
||||
|
@ -227,7 +238,7 @@ static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOff
|
|||
|| pos < totalSize + XZ_STREAM_HEADER_SIZE)
|
||||
return SZ_ERROR_ARCHIVE;
|
||||
pos -= (totalSize + XZ_STREAM_HEADER_SIZE);
|
||||
RINOK(LookInStream_SeekTo(stream, pos));
|
||||
RINOK(LookInStream_SeekTo(stream, pos))
|
||||
*startOffset = (Int64)pos;
|
||||
}
|
||||
{
|
||||
|
@ -236,7 +247,7 @@ static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOff
|
|||
SecToRead_CreateVTable(&secToRead);
|
||||
secToRead.realStream = stream;
|
||||
|
||||
RINOK(Xz_ReadHeader(&headerFlags, &secToRead.vt));
|
||||
RINOK(Xz_ReadHeader(&headerFlags, &secToRead.vt))
|
||||
return (p->flags == headerFlags) ? SZ_OK : SZ_ERROR_ARCHIVE;
|
||||
}
|
||||
}
|
||||
|
@ -274,7 +285,9 @@ UInt64 Xzs_GetUnpackSize(const CXzs *p)
|
|||
UInt64 size = 0;
|
||||
size_t i;
|
||||
for (i = 0; i < p->num; i++)
|
||||
ADD_SIZE_CHECK(size, Xz_GetUnpackSize(&p->streams[i]));
|
||||
{
|
||||
ADD_SIZE_CHECK(size, Xz_GetUnpackSize(&p->streams[i]))
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -284,15 +297,17 @@ UInt64 Xzs_GetPackSize(const CXzs *p)
|
|||
UInt64 size = 0;
|
||||
size_t i;
|
||||
for (i = 0; i < p->num; i++)
|
||||
ADD_SIZE_CHECK(size, Xz_GetTotalSize(&p->streams[i]));
|
||||
{
|
||||
ADD_SIZE_CHECK(size, Xz_GetTotalSize(&p->streams[i]))
|
||||
}
|
||||
return size;
|
||||
}
|
||||
*/
|
||||
|
||||
SRes Xzs_ReadBackward(CXzs *p, ILookInStream *stream, Int64 *startOffset, ICompressProgress *progress, ISzAllocPtr alloc)
|
||||
SRes Xzs_ReadBackward(CXzs *p, ILookInStreamPtr stream, Int64 *startOffset, ICompressProgressPtr progress, ISzAllocPtr alloc)
|
||||
{
|
||||
Int64 endOffset = 0;
|
||||
RINOK(ILookInStream_Seek(stream, &endOffset, SZ_SEEK_END));
|
||||
RINOK(ILookInStream_Seek(stream, &endOffset, SZ_SEEK_END))
|
||||
*startOffset = endOffset;
|
||||
for (;;)
|
||||
{
|
||||
|
@ -301,7 +316,7 @@ SRes Xzs_ReadBackward(CXzs *p, ILookInStream *stream, Int64 *startOffset, ICompr
|
|||
Xz_Construct(&st);
|
||||
res = Xz_ReadBackward(&st, stream, startOffset, alloc);
|
||||
st.startOffset = (UInt64)*startOffset;
|
||||
RINOK(res);
|
||||
RINOK(res)
|
||||
if (p->num == p->numAllocated)
|
||||
{
|
||||
const size_t newNum = p->num + p->num / 4 + 1;
|
||||
|
@ -317,7 +332,7 @@ SRes Xzs_ReadBackward(CXzs *p, ILookInStream *stream, Int64 *startOffset, ICompr
|
|||
p->streams[p->num++] = st;
|
||||
if (*startOffset == 0)
|
||||
break;
|
||||
RINOK(LookInStream_SeekTo(stream, (UInt64)*startOffset));
|
||||
RINOK(LookInStream_SeekTo(stream, (UInt64)*startOffset))
|
||||
if (progress && ICompressProgress_Progress(progress, (UInt64)(endOffset - *startOffset), (UInt64)(Int64)-1) != SZ_OK)
|
||||
return SZ_ERROR_PROGRESS;
|
||||
}
|
||||
|
|
|
@ -254,7 +254,7 @@ namespace
|
|||
};
|
||||
MemoryInStream mis = {
|
||||
{.Read = [](const ISeqInStream* p, void* buf, size_t* size) -> SRes {
|
||||
MemoryInStream* mis = CONTAINER_FROM_VTBL(p, MemoryInStream, vt);
|
||||
MemoryInStream* mis = Z7_CONTAINER_FROM_VTBL(p, MemoryInStream, vt);
|
||||
const size_t avail = mis->buffer_size - mis->read_pos;
|
||||
const size_t copy = std::min(avail, *size);
|
||||
|
||||
|
@ -274,7 +274,7 @@ namespace
|
|||
};
|
||||
DumpOutStream dos = {
|
||||
{.Write = [](const ISeqOutStream* p, const void* buf, size_t size) -> size_t {
|
||||
DumpOutStream* dos = CONTAINER_FROM_VTBL(p, DumpOutStream, vt);
|
||||
DumpOutStream* dos = Z7_CONTAINER_FROM_VTBL(p, DumpOutStream, vt);
|
||||
dos->real->Write(buf, size);
|
||||
return size;
|
||||
}},
|
||||
|
|
|
@ -302,14 +302,14 @@ namespace
|
|||
|
||||
MyFileInStream fis = {
|
||||
{.Read = [](const ISeekInStream* p, void* buf, size_t* size) -> SRes {
|
||||
MyFileInStream* fis = CONTAINER_FROM_VTBL(p, MyFileInStream, vt);
|
||||
MyFileInStream* fis = Z7_CONTAINER_FROM_VTBL(p, MyFileInStream, vt);
|
||||
const size_t size_to_read = *size;
|
||||
const auto bytes_read = std::fread(buf, 1, size_to_read, fis->fp);
|
||||
*size = (bytes_read >= 0) ? bytes_read : 0;
|
||||
return (bytes_read == size_to_read) ? SZ_OK : SZ_ERROR_READ;
|
||||
},
|
||||
.Seek = [](const ISeekInStream* p, Int64* pos, ESzSeek origin) -> SRes {
|
||||
MyFileInStream* fis = CONTAINER_FROM_VTBL(p, MyFileInStream, vt);
|
||||
MyFileInStream* fis = Z7_CONTAINER_FROM_VTBL(p, MyFileInStream, vt);
|
||||
static_assert(SZ_SEEK_CUR == SEEK_CUR && SZ_SEEK_SET == SEEK_SET && SZ_SEEK_END == SEEK_END);
|
||||
if (FileSystem::FSeek64(fis->fp, *pos, static_cast<int>(origin)) != 0)
|
||||
return SZ_ERROR_READ;
|
||||
|
@ -324,7 +324,7 @@ namespace
|
|||
m_fp.get()};
|
||||
|
||||
CLookToRead2 look_stream = {};
|
||||
LookToRead2_Init(&look_stream);
|
||||
LookToRead2_INIT(&look_stream);
|
||||
LookToRead2_CreateVTable(&look_stream, False);
|
||||
look_stream.realStream = &fis.vt;
|
||||
look_stream.bufSize = kInputBufSize;
|
||||
|
|
|
@ -81,7 +81,7 @@ bool Updater::OpenUpdateZip(const char* path)
|
|||
|
||||
m_look_stream.bufSize = kInputBufSize;
|
||||
m_look_stream.realStream = &m_archive_stream.vt;
|
||||
LookToRead2_Init(&m_look_stream);
|
||||
LookToRead2_INIT(&m_look_stream);
|
||||
|
||||
#ifdef _WIN32
|
||||
WRes wres = InFile_OpenW(&m_archive_stream.file, FileSystem::GetWin32Path(path).c_str());
|
||||
|
|
|
@ -49,7 +49,7 @@ static inline bool ExtractUpdater(const char* archive_path, const char* destinat
|
|||
|
||||
lookstream.bufSize = kInputBufSize;
|
||||
lookstream.realStream = &instream.vt;
|
||||
LookToRead2_Init(&lookstream);
|
||||
LookToRead2_INIT(&lookstream);
|
||||
ScopedGuard buffer_guard([&lookstream]() {
|
||||
ISzAlloc_Free(&g_Alloc, lookstream.buf);
|
||||
});
|
||||
|
|
Loading…
Reference in New Issue