[GPU] XXH3 hash instead of XXH64

This commit is contained in:
Triang3l 2020-12-08 22:31:09 +03:00
parent 9a4643d0f2
commit 36a0bcec8b
24 changed files with 83 additions and 2002 deletions

3
.gitmodules vendored
View File

@ -73,3 +73,6 @@
[submodule "third_party/date"] [submodule "third_party/date"]
path = third_party/date path = third_party/date
url = https://github.com/HowardHinnant/date.git url = https://github.com/HowardHinnant/date.git
[submodule "third_party/xxhash"]
path = third_party/xxhash
url = https://github.com/Cyan4973/xxHash.git

View File

@ -17,7 +17,7 @@ namespace hash {
// For use in unordered_sets and unordered_maps (primarily multisets and // For use in unordered_sets and unordered_maps (primarily multisets and
// multimaps, with manual collision resolution), where the hash is calculated // multimaps, with manual collision resolution), where the hash is calculated
// externally (for instance, as XXH64), possibly requiring context data rather // externally (for instance, as XXH3), possibly requiring context data rather
// than a pure function to calculate the hash // than a pure function to calculate the hash
template <typename Key> template <typename Key>
struct IdentityHasher { struct IdentityHasher {

21
src/xenia/base/xxhash.h Normal file
View File

@ -0,0 +1,21 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2020 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef XENIA_BASE_XXHASH_H_
#define XENIA_BASE_XXHASH_H_
#define XXH_INLINE_ALL
// Can't use XXH_X86DISPATCH because XXH is calculated on multiple threads,
// while the dispatch writes the result (multiple pointers without any
// synchronization) to XXH_g_dispatch at the first call.
#include "third_party/xxhash/xxhash.h"
#endif // XENIA_BASE_XXHASH_H_

View File

@ -7,8 +7,6 @@
****************************************************************************** ******************************************************************************
*/ */
#include "third_party/xxhash/xxhash.h"
#include <algorithm> #include <algorithm>
#include <cstring> #include <cstring>
#include <utility> #include <utility>

View File

@ -20,7 +20,6 @@
#include <utility> #include <utility>
#include "third_party/fmt/include/fmt/format.h" #include "third_party/fmt/include/fmt/format.h"
#include "third_party/xxhash/xxhash.h"
#include "xenia/base/assert.h" #include "xenia/base/assert.h"
#include "xenia/base/byte_order.h" #include "xenia/base/byte_order.h"
#include "xenia/base/clock.h" #include "xenia/base/clock.h"
@ -30,6 +29,7 @@
#include "xenia/base/math.h" #include "xenia/base/math.h"
#include "xenia/base/profiling.h" #include "xenia/base/profiling.h"
#include "xenia/base/string.h" #include "xenia/base/string.h"
#include "xenia/base/xxhash.h"
#include "xenia/gpu/d3d12/d3d12_command_processor.h" #include "xenia/gpu/d3d12/d3d12_command_processor.h"
#include "xenia/gpu/gpu_flags.h" #include "xenia/gpu/gpu_flags.h"
#include "xenia/ui/d3d12/d3d12_util.h" #include "xenia/ui/d3d12/d3d12_util.h"
@ -325,9 +325,9 @@ void PipelineCache::InitializeShaderStorage(
pipeline_stored_descriptions[i]; pipeline_stored_descriptions[i];
// Validate file integrity, stop and truncate the stream if data is // Validate file integrity, stop and truncate the stream if data is
// corrupted. // corrupted.
if (XXH64(&pipeline_stored_description.description, if (XXH3_64bits(&pipeline_stored_description.description,
sizeof(pipeline_stored_description.description), sizeof(pipeline_stored_description.description)) !=
0) != pipeline_stored_description.description_hash) { pipeline_stored_description.description_hash) {
pipeline_stored_descriptions.resize(i); pipeline_stored_descriptions.resize(i);
break; break;
} }
@ -471,7 +471,7 @@ void PipelineCache::InitializeShaderStorage(
break; break;
} }
uint64_t ucode_data_hash = uint64_t ucode_data_hash =
XXH64(ucode_dwords.data(), ucode_byte_count, 0); XXH3_64bits(ucode_dwords.data(), ucode_byte_count);
if (shader_header.ucode_data_hash != ucode_data_hash) { if (shader_header.ucode_data_hash != ucode_data_hash) {
// Validation failed. // Validation failed.
break; break;
@ -828,7 +828,7 @@ D3D12Shader* PipelineCache::LoadShader(xenos::ShaderType shader_type,
uint32_t dword_count) { uint32_t dword_count) {
// Hash the input memory and lookup the shader. // Hash the input memory and lookup the shader.
return LoadShader(shader_type, host_address, dword_count, return LoadShader(shader_type, host_address, dword_count,
XXH64(host_address, dword_count * sizeof(uint32_t), 0)); XXH3_64bits(host_address, dword_count * sizeof(uint32_t)));
} }
D3D12Shader* PipelineCache::LoadShader(xenos::ShaderType shader_type, D3D12Shader* PipelineCache::LoadShader(xenos::ShaderType shader_type,
@ -1065,7 +1065,7 @@ bool PipelineCache::ConfigurePipeline(
} }
// Find an existing pipeline in the cache. // Find an existing pipeline in the cache.
uint64_t hash = XXH64(&description, sizeof(description), 0); uint64_t hash = XXH3_64bits(&description, sizeof(description));
auto found_range = pipelines_.equal_range(hash); auto found_range = pipelines_.equal_range(hash);
for (auto it = found_range.first; it != found_range.second; ++it) { for (auto it = found_range.first; it != found_range.second; ++it) {
Pipeline* found_pipeline = it->second; Pipeline* found_pipeline = it->second;
@ -1185,20 +1185,20 @@ bool PipelineCache::TranslateShader(DxbcShaderTranslator& translator,
uint64_t texture_binding_layout_hash = 0; uint64_t texture_binding_layout_hash = 0;
if (texture_binding_count) { if (texture_binding_count) {
texture_binding_layout_hash = texture_binding_layout_hash =
XXH64(texture_bindings, texture_binding_layout_bytes, 0); XXH3_64bits(texture_bindings, texture_binding_layout_bytes);
} }
uint32_t bindless_sampler_count = uint32_t bindless_sampler_count =
bindless_resources_used_ ? sampler_binding_count : 0; bindless_resources_used_ ? sampler_binding_count : 0;
uint64_t bindless_sampler_layout_hash = 0; uint64_t bindless_sampler_layout_hash = 0;
if (bindless_sampler_count) { if (bindless_sampler_count) {
XXH64_state_t hash_state; XXH3_state_t hash_state;
XXH64_reset(&hash_state, 0); XXH3_64bits_reset(&hash_state);
for (uint32_t i = 0; i < bindless_sampler_count; ++i) { for (uint32_t i = 0; i < bindless_sampler_count; ++i) {
XXH64_update(&hash_state, XXH3_64bits_update(
&sampler_bindings[i].bindless_descriptor_index, &hash_state, &sampler_bindings[i].bindless_descriptor_index,
sizeof(sampler_bindings[i].bindless_descriptor_index)); sizeof(sampler_bindings[i].bindless_descriptor_index));
} }
bindless_sampler_layout_hash = XXH64_digest(&hash_state); bindless_sampler_layout_hash = XXH3_64bits_digest(&hash_state);
} }
// Obtain the unique IDs of binding layouts if there are any texture // Obtain the unique IDs of binding layouts if there are any texture
// bindings or bindless samplers, for invalidation in the command processor. // bindings or bindless samplers, for invalidation in the command processor.

View File

@ -95,7 +95,7 @@ class PipelineCache {
reg::SQ_PROGRAM_CNTL sq_program_cntl; reg::SQ_PROGRAM_CNTL sq_program_cntl;
static constexpr uint32_t kVersion = 0x20201129; static constexpr uint32_t kVersion = 0x20201207;
}); });
// Update PipelineDescription::kVersion if any of the Pipeline* enums are // Update PipelineDescription::kVersion if any of the Pipeline* enums are
@ -208,7 +208,7 @@ class PipelineCache {
PipelineRenderTarget render_targets[4]; PipelineRenderTarget render_targets[4];
static constexpr uint32_t kVersion = 0x20201202; static constexpr uint32_t kVersion = 0x20201207;
}); });
XEPACKEDSTRUCT(PipelineStoredDescription, { XEPACKEDSTRUCT(PipelineStoredDescription, {
@ -279,7 +279,7 @@ class PipelineCache {
// Texture binding layouts of different shaders, for obtaining layout UIDs. // Texture binding layouts of different shaders, for obtaining layout UIDs.
std::vector<D3D12Shader::TextureBinding> texture_binding_layouts_; std::vector<D3D12Shader::TextureBinding> texture_binding_layouts_;
// Map of texture binding layouts used by shaders, for obtaining UIDs. Keys // Map of texture binding layouts used by shaders, for obtaining UIDs. Keys
// are XXH64 hashes of layouts, values need manual collision resolution using // are XXH3 hashes of layouts, values need manual collision resolution using
// layout_vector_offset:layout_length of texture_binding_layouts_. // layout_vector_offset:layout_length of texture_binding_layouts_.
std::unordered_multimap<uint64_t, LayoutUID, std::unordered_multimap<uint64_t, LayoutUID,
xe::hash::IdentityHasher<uint64_t>> xe::hash::IdentityHasher<uint64_t>>
@ -287,7 +287,7 @@ class PipelineCache {
// Bindless sampler indices of different shaders, for obtaining layout UIDs. // Bindless sampler indices of different shaders, for obtaining layout UIDs.
// For bindful, sampler count is used as the UID instead. // For bindful, sampler count is used as the UID instead.
std::vector<uint32_t> bindless_sampler_layouts_; std::vector<uint32_t> bindless_sampler_layouts_;
// Keys are XXH64 hashes of used bindless sampler indices. // Keys are XXH3 hashes of used bindless sampler indices.
std::unordered_multimap<uint64_t, LayoutUID, std::unordered_multimap<uint64_t, LayoutUID,
xe::hash::IdentityHasher<uint64_t>> xe::hash::IdentityHasher<uint64_t>>
bindless_sampler_layout_map_; bindless_sampler_layout_map_;

View File

@ -9,7 +9,6 @@
#include "xenia/gpu/d3d12/texture_cache.h" #include "xenia/gpu/d3d12/texture_cache.h"
#include "third_party/xxhash/xxhash.h"
#include <algorithm> #include <algorithm>
#include <cfloat> #include <cfloat>
@ -21,6 +20,7 @@
#include "xenia/base/logging.h" #include "xenia/base/logging.h"
#include "xenia/base/math.h" #include "xenia/base/math.h"
#include "xenia/base/profiling.h" #include "xenia/base/profiling.h"
#include "xenia/base/xxhash.h"
#include "xenia/gpu/d3d12/d3d12_command_processor.h" #include "xenia/gpu/d3d12/d3d12_command_processor.h"
#include "xenia/gpu/gpu_flags.h" #include "xenia/gpu/gpu_flags.h"
#include "xenia/gpu/texture_info.h" #include "xenia/gpu/texture_info.h"

View File

@ -12,7 +12,7 @@
#include <cstring> #include <cstring>
#include <memory> #include <memory>
#include "third_party/xxhash/xxhash.h" #include "xenia/base/xxhash.h"
namespace xe { namespace xe {
namespace gpu { namespace gpu {
@ -51,7 +51,7 @@ bool SamplerInfo::Prepare(const xenos::xe_gpu_texture_fetch_t& fetch,
} }
uint64_t SamplerInfo::hash() const { uint64_t SamplerInfo::hash() const {
return XXH64(this, sizeof(SamplerInfo), 0); return XXH3_64bits(this, sizeof(SamplerInfo));
} }
} // namespace gpu } // namespace gpu

View File

@ -18,8 +18,7 @@
#include "xenia/base/math.h" #include "xenia/base/math.h"
#include "xenia/base/memory.h" #include "xenia/base/memory.h"
#include "xenia/base/profiling.h" #include "xenia/base/profiling.h"
#include "xenia/base/xxhash.h"
#include "third_party/xxhash/xxhash.h"
namespace xe { namespace xe {
namespace gpu { namespace gpu {

View File

@ -16,8 +16,7 @@
#include "xenia/base/logging.h" #include "xenia/base/logging.h"
#include "xenia/base/math.h" #include "xenia/base/math.h"
#include "xenia/base/memory.h" #include "xenia/base/memory.h"
#include "xenia/base/xxhash.h"
#include "third_party/xxhash/xxhash.h"
namespace xe { namespace xe {
namespace gpu { namespace gpu {
@ -319,7 +318,7 @@ bool TextureInfo::GetPackedTileOffset(int packed_tile, uint32_t* offset_x,
} }
uint64_t TextureInfo::hash() const { uint64_t TextureInfo::hash() const {
return XXH64(this, sizeof(TextureInfo), 0); return XXH3_64bits(this, sizeof(TextureInfo));
} }
void TextureInfo::SetupMemoryInfo(uint32_t base_address, uint32_t mip_address) { void TextureInfo::SetupMemoryInfo(uint32_t base_address, uint32_t mip_address) {

View File

@ -552,14 +552,14 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadVertexBuffer(
} }
void BufferCache::HashVertexBindings( void BufferCache::HashVertexBindings(
XXH64_state_t* hash_state, XXH3_state_t* hash_state,
const std::vector<Shader::VertexBinding>& vertex_bindings) { const std::vector<Shader::VertexBinding>& vertex_bindings) {
auto& regs = *register_file_; auto& regs = *register_file_;
for (const auto& vertex_binding : vertex_bindings) { for (const auto& vertex_binding : vertex_bindings) {
#if 0 #if 0
XXH64_update(hash_state, &vertex_binding.binding_index, sizeof(vertex_binding.binding_index)); XXH3_64bits_update(hash_state, &vertex_binding.binding_index, sizeof(vertex_binding.binding_index));
XXH64_update(hash_state, &vertex_binding.fetch_constant, sizeof(vertex_binding.fetch_constant)); XXH3_64bits_update(hash_state, &vertex_binding.fetch_constant, sizeof(vertex_binding.fetch_constant));
XXH64_update(hash_state, &vertex_binding.stride_words, sizeof(vertex_binding.stride_words)); XXH3_64bits_update(hash_state, &vertex_binding.stride_words, sizeof(vertex_binding.stride_words));
#endif #endif
int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 + int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 +
(vertex_binding.fetch_constant / 3) * 6; (vertex_binding.fetch_constant / 3) * 6;
@ -567,15 +567,15 @@ void BufferCache::HashVertexBindings(
switch (vertex_binding.fetch_constant % 3) { switch (vertex_binding.fetch_constant % 3) {
case 0: { case 0: {
auto& fetch = group->vertex_fetch_0; auto& fetch = group->vertex_fetch_0;
XXH64_update(hash_state, &fetch, sizeof(fetch)); XXH3_64bits_update(hash_state, &fetch, sizeof(fetch));
} break; } break;
case 1: { case 1: {
auto& fetch = group->vertex_fetch_1; auto& fetch = group->vertex_fetch_1;
XXH64_update(hash_state, &fetch, sizeof(fetch)); XXH3_64bits_update(hash_state, &fetch, sizeof(fetch));
} break; } break;
case 2: { case 2: {
auto& fetch = group->vertex_fetch_2; auto& fetch = group->vertex_fetch_2;
XXH64_update(hash_state, &fetch, sizeof(fetch)); XXH3_64bits_update(hash_state, &fetch, sizeof(fetch));
} break; } break;
} }
} }
@ -585,12 +585,12 @@ VkDescriptorSet BufferCache::PrepareVertexSet(
VkCommandBuffer command_buffer, VkFence fence, VkCommandBuffer command_buffer, VkFence fence,
const std::vector<Shader::VertexBinding>& vertex_bindings) { const std::vector<Shader::VertexBinding>& vertex_bindings) {
// (quickly) Generate a hash. // (quickly) Generate a hash.
XXH64_state_t hash_state; XXH3_state_t hash_state;
XXH64_reset(&hash_state, 0); XXH3_64bits_reset(&hash_state);
// (quickly) Generate a hash. // (quickly) Generate a hash.
HashVertexBindings(&hash_state, vertex_bindings); HashVertexBindings(&hash_state, vertex_bindings);
uint64_t hash = XXH64_digest(&hash_state); uint64_t hash = XXH3_64bits_digest(&hash_state);
for (auto it = vertex_sets_.find(hash); it != vertex_sets_.end(); ++it) { for (auto it = vertex_sets_.find(hash); it != vertex_sets_.end(); ++it) {
// TODO(DrChat): We need to compare the bindings and ensure they're equal. // TODO(DrChat): We need to compare the bindings and ensure they're equal.
return it->second; return it->second;

View File

@ -10,6 +10,7 @@
#ifndef XENIA_GPU_VULKAN_BUFFER_CACHE_H_ #ifndef XENIA_GPU_VULKAN_BUFFER_CACHE_H_
#define XENIA_GPU_VULKAN_BUFFER_CACHE_H_ #define XENIA_GPU_VULKAN_BUFFER_CACHE_H_
#include "xenia/base/xxhash.h"
#include "xenia/gpu/register_file.h" #include "xenia/gpu/register_file.h"
#include "xenia/gpu/shader.h" #include "xenia/gpu/shader.h"
#include "xenia/gpu/xenos.h" #include "xenia/gpu/xenos.h"
@ -20,7 +21,6 @@
#include "xenia/ui/vulkan/vulkan_device.h" #include "xenia/ui/vulkan/vulkan_device.h"
#include "third_party/vulkan/vk_mem_alloc.h" #include "third_party/vulkan/vk_mem_alloc.h"
#include "third_party/xxhash/xxhash.h"
#include <map> #include <map>
#include <unordered_map> #include <unordered_map>
@ -127,7 +127,7 @@ class BufferCache {
void FreeConstantDescriptorSet(); void FreeConstantDescriptorSet();
void HashVertexBindings( void HashVertexBindings(
XXH64_state_t* hash_state, XXH3_state_t* hash_state,
const std::vector<Shader::VertexBinding>& vertex_bindings); const std::vector<Shader::VertexBinding>& vertex_bindings);
// Allocates a block of memory in the transient buffer. // Allocates a block of memory in the transient buffer.

View File

@ -9,11 +9,11 @@
#include "xenia/gpu/vulkan/pipeline_cache.h" #include "xenia/gpu/vulkan/pipeline_cache.h"
#include "third_party/xxhash/xxhash.h"
#include "xenia/base/logging.h" #include "xenia/base/logging.h"
#include "xenia/base/math.h" #include "xenia/base/math.h"
#include "xenia/base/memory.h" #include "xenia/base/memory.h"
#include "xenia/base/profiling.h" #include "xenia/base/profiling.h"
#include "xenia/base/xxhash.h"
#include "xenia/gpu/gpu_flags.h" #include "xenia/gpu/gpu_flags.h"
#include "xenia/gpu/vulkan/vulkan_gpu_flags.h" #include "xenia/gpu/vulkan/vulkan_gpu_flags.h"
@ -208,7 +208,8 @@ VulkanShader* PipelineCache::LoadShader(xenos::ShaderType shader_type,
const uint32_t* host_address, const uint32_t* host_address,
uint32_t dword_count) { uint32_t dword_count) {
// Hash the input memory and lookup the shader. // Hash the input memory and lookup the shader.
uint64_t data_hash = XXH64(host_address, dword_count * sizeof(uint32_t), 0); uint64_t data_hash =
XXH3_64bits(host_address, dword_count * sizeof(uint32_t));
auto it = shader_map_.find(data_hash); auto it = shader_map_.find(data_hash);
if (it != shader_map_.end()) { if (it != shader_map_.end()) {
// Shader has been previously loaded. // Shader has been previously loaded.
@ -259,7 +260,7 @@ PipelineCache::UpdateStatus PipelineCache::ConfigurePipeline(
} }
if (!pipeline) { if (!pipeline) {
// Should have a hash key produced by the UpdateState pass. // Should have a hash key produced by the UpdateState pass.
uint64_t hash_key = XXH64_digest(&hash_state_); uint64_t hash_key = XXH3_64bits_digest(&hash_state_);
pipeline = GetPipeline(render_state, hash_key); pipeline = GetPipeline(render_state, hash_key);
current_pipeline_ = pipeline; current_pipeline_ = pipeline;
if (!pipeline) { if (!pipeline) {
@ -961,7 +962,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateState(
bool mismatch = false; bool mismatch = false;
// Reset hash so we can build it up. // Reset hash so we can build it up.
XXH64_reset(&hash_state_, 0); XXH3_64bits_reset(&hash_state_);
#define CHECK_UPDATE_STATUS(status, mismatch, error_message) \ #define CHECK_UPDATE_STATUS(status, mismatch, error_message) \
{ \ { \
@ -1028,7 +1029,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateRenderTargetState() {
regs.rb_color1_info.color_format = cur_regs->rb_color1_info.color_format; regs.rb_color1_info.color_format = cur_regs->rb_color1_info.color_format;
regs.rb_color2_info.color_format = cur_regs->rb_color2_info.color_format; regs.rb_color2_info.color_format = cur_regs->rb_color2_info.color_format;
regs.rb_color3_info.color_format = cur_regs->rb_color3_info.color_format; regs.rb_color3_info.color_format = cur_regs->rb_color3_info.color_format;
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1061,7 +1062,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateShaderStages(
regs.vertex_shader = vertex_shader; regs.vertex_shader = vertex_shader;
regs.pixel_shader = pixel_shader; regs.pixel_shader = pixel_shader;
regs.primitive_type = primitive_type; regs.primitive_type = primitive_type;
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1148,7 +1149,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateVertexInputState(
bool dirty = false; bool dirty = false;
dirty |= vertex_shader != regs.vertex_shader; dirty |= vertex_shader != regs.vertex_shader;
regs.vertex_shader = vertex_shader; regs.vertex_shader = vertex_shader;
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1177,7 +1178,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateInputAssemblyState(
dirty |= SetShadowRegister(&regs.multi_prim_ib_reset_index, dirty |= SetShadowRegister(&regs.multi_prim_ib_reset_index,
XE_GPU_REG_VGT_MULTI_PRIM_IB_RESET_INDX); XE_GPU_REG_VGT_MULTI_PRIM_IB_RESET_INDX);
regs.primitive_type = primitive_type; regs.primitive_type = primitive_type;
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1303,7 +1304,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateRasterizationState(
dirty = true; dirty = true;
} }
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1385,7 +1386,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateMultisampleState() {
dirty |= SetShadowRegister(&regs.pa_su_sc_mode_cntl, dirty |= SetShadowRegister(&regs.pa_su_sc_mode_cntl,
XE_GPU_REG_PA_SU_SC_MODE_CNTL); XE_GPU_REG_PA_SU_SC_MODE_CNTL);
dirty |= SetShadowRegister(&regs.rb_surface_info, XE_GPU_REG_RB_SURFACE_INFO); dirty |= SetShadowRegister(&regs.rb_surface_info, XE_GPU_REG_RB_SURFACE_INFO);
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1437,7 +1438,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateDepthStencilState() {
dirty |= SetShadowRegister(&regs.rb_depthcontrol, XE_GPU_REG_RB_DEPTHCONTROL); dirty |= SetShadowRegister(&regs.rb_depthcontrol, XE_GPU_REG_RB_DEPTHCONTROL);
dirty |= dirty |=
SetShadowRegister(&regs.rb_stencilrefmask, XE_GPU_REG_RB_STENCILREFMASK); SetShadowRegister(&regs.rb_stencilrefmask, XE_GPU_REG_RB_STENCILREFMASK);
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }
@ -1526,7 +1527,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateColorBlendState() {
dirty |= dirty |=
SetShadowRegister(&regs.rb_blendcontrol[3], XE_GPU_REG_RB_BLENDCONTROL3); SetShadowRegister(&regs.rb_blendcontrol[3], XE_GPU_REG_RB_BLENDCONTROL3);
dirty |= SetShadowRegister(&regs.rb_modecontrol, XE_GPU_REG_RB_MODECONTROL); dirty |= SetShadowRegister(&regs.rb_modecontrol, XE_GPU_REG_RB_MODECONTROL);
XXH64_update(&hash_state_, &regs, sizeof(regs)); XXH3_64bits_update(&hash_state_, &regs, sizeof(regs));
if (!dirty) { if (!dirty) {
return UpdateStatus::kCompatible; return UpdateStatus::kCompatible;
} }

View File

@ -12,8 +12,7 @@
#include <unordered_map> #include <unordered_map>
#include "third_party/xxhash/xxhash.h" #include "xenia/base/xxhash.h"
#include "xenia/gpu/register_file.h" #include "xenia/gpu/register_file.h"
#include "xenia/gpu/spirv_shader_translator.h" #include "xenia/gpu/spirv_shader_translator.h"
#include "xenia/gpu/vulkan/render_cache.h" #include "xenia/gpu/vulkan/render_cache.h"
@ -121,7 +120,7 @@ class PipelineCache {
// Hash state used to incrementally produce pipeline hashes during update. // Hash state used to incrementally produce pipeline hashes during update.
// By the time the full update pass has run the hash will represent the // By the time the full update pass has run the hash will represent the
// current state in a way that can uniquely identify the produced VkPipeline. // current state in a way that can uniquely identify the produced VkPipeline.
XXH64_state_t hash_state_; XXH3_state_t hash_state_;
// All previously generated pipelines mapped by hash. // All previously generated pipelines mapped by hash.
std::unordered_map<uint64_t, VkPipeline> cached_pipelines_; std::unordered_map<uint64_t, VkPipeline> cached_pipelines_;

View File

@ -1377,7 +1377,7 @@ void TextureCache::WritebackTexture(Texture* texture) {
} }
void TextureCache::HashTextureBindings( void TextureCache::HashTextureBindings(
XXH64_state_t* hash_state, uint32_t& fetch_mask, XXH3_state_t* hash_state, uint32_t& fetch_mask,
const std::vector<Shader::TextureBinding>& bindings) { const std::vector<Shader::TextureBinding>& bindings) {
for (auto& binding : bindings) { for (auto& binding : bindings) {
uint32_t fetch_bit = 1 << binding.fetch_constant; uint32_t fetch_bit = 1 << binding.fetch_constant;
@ -1393,7 +1393,7 @@ void TextureCache::HashTextureBindings(
reinterpret_cast<const xenos::xe_gpu_fetch_group_t*>(&regs.values[r]); reinterpret_cast<const xenos::xe_gpu_fetch_group_t*>(&regs.values[r]);
auto& fetch = group->texture_fetch; auto& fetch = group->texture_fetch;
XXH64_update(hash_state, &fetch, sizeof(fetch)); XXH3_64bits_update(hash_state, &fetch, sizeof(fetch));
} }
} }
@ -1401,14 +1401,14 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
VkCommandBuffer command_buffer, VkFence completion_fence, VkCommandBuffer command_buffer, VkFence completion_fence,
const std::vector<Shader::TextureBinding>& vertex_bindings, const std::vector<Shader::TextureBinding>& vertex_bindings,
const std::vector<Shader::TextureBinding>& pixel_bindings) { const std::vector<Shader::TextureBinding>& pixel_bindings) {
XXH64_state_t hash_state; XXH3_state_t hash_state;
XXH64_reset(&hash_state, 0); XXH3_64bits_reset(&hash_state);
// (quickly) Generate a hash. // (quickly) Generate a hash.
uint32_t fetch_mask = 0; uint32_t fetch_mask = 0;
HashTextureBindings(&hash_state, fetch_mask, vertex_bindings); HashTextureBindings(&hash_state, fetch_mask, vertex_bindings);
HashTextureBindings(&hash_state, fetch_mask, pixel_bindings); HashTextureBindings(&hash_state, fetch_mask, pixel_bindings);
uint64_t hash = XXH64_digest(&hash_state); uint64_t hash = XXH3_64bits_digest(&hash_state);
for (auto it = texture_sets_.find(hash); it != texture_sets_.end(); ++it) { for (auto it = texture_sets_.find(hash); it != texture_sets_.end(); ++it) {
// TODO(DrChat): We need to compare the bindings and ensure they're equal. // TODO(DrChat): We need to compare the bindings and ensure they're equal.
return it->second; return it->second;

View File

@ -186,7 +186,7 @@ class TextureCache {
bool UploadTexture(VkCommandBuffer command_buffer, VkFence completion_fence, bool UploadTexture(VkCommandBuffer command_buffer, VkFence completion_fence,
Texture* dest, const TextureInfo& src); Texture* dest, const TextureInfo& src);
void HashTextureBindings(XXH64_state_t* hash_state, uint32_t& fetch_mask, void HashTextureBindings(XXH3_state_t* hash_state, uint32_t& fetch_mask,
const std::vector<Shader::TextureBinding>& bindings); const std::vector<Shader::TextureBinding>& bindings);
bool SetupTextureBindings( bool SetupTextureBindings(
VkCommandBuffer command_buffer, VkFence completion_fence, VkCommandBuffer command_buffer, VkFence completion_fence,

1
third_party/xxhash vendored Submodule

@ -0,0 +1 @@
Subproject commit 4c881f796d6af27ef7d9c48f87817da0d3d75dc1

View File

@ -1,24 +0,0 @@
xxHash Library
Copyright (c) 2012-2014, Yann Collet
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,67 +0,0 @@
# ################################################################
# xxHash Makefile
# Copyright (C) Yann Collet 2012-2014
# GPL v2 License
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# You can contact the author at :
# - xxHash source repository : http://code.google.com/p/xxhash/
# ################################################################
# xxHash.exe : benchmark program, to demonstrate xxHash speed
# ################################################################
CC := $(CC)
CFLAGS ?= -O3
CFLAGS += -I. -std=c99 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Wstrict-prototypes
# Define *.exe as extension for Windows systems
ifneq (,$(filter Windows%,$(OS)))
EXT =.exe
else
EXT =
endif
default: xxhsum
all: xxhsum xxhsum32
xxhsum: xxhash.c xxhsum.c
$(CC) $(CFLAGS) $^ -o $@$(EXT)
ln -sf $@ xxh32sum
ln -sf $@ xxh64sum
xxhsum32: xxhash.c xxhsum.c
$(CC) -m32 $(CFLAGS) $^ -o $@$(EXT)
test: $(TEST_TARGETS)
test: xxhsum
./xxhsum < xxhash.c
./xxhsum -b xxhash.c
valgrind --leak-check=yes ./xxhsum -bi1 xxhash.c
valgrind --leak-check=yes ./xxhsum -H0 xxhash.c
valgrind --leak-check=yes ./xxhsum -H1 xxhash.c
test-all: test xxhsum32
./xxhsum32 -b xxhash.c
clean:
@rm -f core *.o xxhsum$(EXT) xxhsum32$(EXT) xxh32sum xxh64sum
@echo cleaning completed

View File

@ -1,74 +0,0 @@
xxHash - Extremely fast hash algorithm
======================================
xxHash is an Extremely fast Hash algorithm, running at RAM speed limits.
It successfully passes the [SMHasher](http://code.google.com/p/smhasher/wiki/SMHasher) Test suite evaluating Hash quality.
|Branch |Status |
|------------|---------|
|master | [![Build Status](https://travis-ci.org/Cyan4973/xxHash.svg?branch=master)](https://travis-ci.org/Cyan4973/xxHash?branch=master) |
|dev | [![Build Status](https://travis-ci.org/Cyan4973/xxHash.svg?branch=dev)](https://travis-ci.org/Cyan4973/xxHash?branch=dev) |
Benchmarks
-------------------------
The benchmark uses SMHasher speed test, compiled with Visual on a Windows Seven 32 bits system.
The reference system uses a Core 2 Duo @3GHz
<table>
<tr>
<th>Name</th><th>Speed</th><th>Q.Score</th><th>Author</th>
</tr>
<tr>
<th>xxHash</th><th>5.4 GB/s</th><th>10</th><th>Y.C.</th>
</tr>
<tr>
<th>MumurHash 3a</th><th>2.7 GB/s</th><th>10</th><th>Austin Appleby</th>
</tr>
<tr>
<th>SBox</th><th>1.4 GB/s</th><th>9</th><th>Bret Mulvey</th>
</tr>
<tr>
<th>Lookup3</th><th>1.2 GB/s</th><th>9</th><th>Bob Jenkins</th>
</tr>
<tr>
<th>CityHash64</th><th>1.05 GB/s</th><th>10</th><th>Pike & Alakuijala</th>
</tr>
<tr>
<th>FNV</th><th>0.55 GB/s</th><th>5</th><th>Fowler, Noll, Vo</th>
</tr>
<tr>
<th>CRC32</th><th>0.43 GB/s</th><th>9</th><th></th>
</tr>
<tr>
<th>SipHash</th><th>0.34 GB/s</th><th>10</th><th>Jean-Philippe Aumasson</th>
</tr>
<tr>
<th>MD5-32</th><th>0.33 GB/s</th><th>10</th><th>Ronald L. Rivest</th>
</tr>
<tr>
<th>SHA1-32</th><th>0.28 GB/s</th><th>10</th><th></th>
</tr>
</table>
Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set.
10 is a perfect score.
A new version, XXH64, has been created thanks to Mathias Westerdahl contribution, which offers superior speed and dispersion for 64-bits systems. Note however that 32-bits applications will still run faster using the 32-bits version.
SMHasher speed test, compiled using GCC 4.8.2, a Linux Mint 64-bits.
The reference system uses a Core i5-3340M @2.7GHz
| Version | Speed on 64-bits | Speed on 32-bits |
|------------|------------------|------------------|
| XXH64 | 13.8 GB/s | 1.9 GB/s |
| XXH32 | 6.8 GB/s | 6.0 GB/s |
This is an official mirror of xxHash project, [hosted on Google Code](http://code.google.com/p/xxhash/).
The intention is to offer github's capabilities to xxhash users, such as cloning, branch, pull requests or source download.
The "master" branch will reflect, the status of xxhash at its official homepage. The "dev" branch is the one where all contributions will be merged. If you plan to propose a patch, please commit into the "dev" branch. Direct commit to "master" are not permitted. Feature branches will also exist, typically to introduce new requirements, and be temporarily available for testing before merge into "dev" branch.

View File

@ -1,2 +0,0 @@
https://code.google.com/p/xxhash/
r39 on 12/23/2014

View File

@ -1,928 +0,0 @@
/*
xxHash - Fast Hash algorithm
Copyright (C) 2012-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : http://code.google.com/p/xxhash/
- public discussion board : https://groups.google.com/forum/#!forum/lz4c
*/
//**************************************
// Tuning parameters
//**************************************
// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
// You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
# define XXH_USE_UNALIGNED_ACCESS 1
#endif
// XXH_ACCEPT_NULL_INPUT_POINTER :
// If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
// When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
// This option has a very small performance cost (only measurable on small inputs).
// By default, this option is disabled. To enable it, uncomment below define :
// #define XXH_ACCEPT_NULL_INPUT_POINTER 1
// XXH_FORCE_NATIVE_FORMAT :
// By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
// Results are therefore identical for little-endian and big-endian CPU.
// This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
// Should endian-independance be of no importance for your application, you may set the #define below to 1.
// It will improve speed for Big-endian CPU.
// This option has no impact on Little_Endian CPU.
#define XXH_FORCE_NATIVE_FORMAT 0
//**************************************
// Compiler Specific Options
//**************************************
// Disable some Visual warning messages
#ifdef _MSC_VER // Visual Studio
# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
#endif
#ifdef _MSC_VER // Visual Studio
# define FORCE_INLINE static __forceinline
#else
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
#endif
//**************************************
// Includes & Memory related functions
//**************************************
#include "xxhash.h"
// Modify the local functions below should you wish to use some other memory routines
// for malloc(), free()
#include <stdlib.h>
static void* XXH_malloc(size_t s) { return malloc(s); }
static void XXH_free (void* p) { free(p); }
// for memcpy()
#include <string.h>
static void* XXH_memcpy(void* dest, const void* src, size_t size)
{
return memcpy(dest,src,size);
}
//**************************************
// Basic Types
//**************************************
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
#endif
#if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
# define _PACKED __attribute__ ((packed))
#else
# define _PACKED
#endif
#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
# ifdef __IBMC__
# pragma pack(1)
# else
# pragma pack(push, 1)
# endif
#endif
typedef struct _U32_S
{
U32 v;
} _PACKED U32_S;
typedef struct _U64_S
{
U64 v;
} _PACKED U64_S;
#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
# pragma pack(pop)
#endif
#define A32(x) (((U32_S *)(x))->v)
#define A64(x) (((U64_S *)(x))->v)
//***************************************
// Compiler-specific Functions and Macros
//***************************************
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
// Note : although _rotl exists for minGW (GCC under windows), performance seems poor
#if defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#endif
#if defined(_MSC_VER) // Visual Studio
# define XXH_swap32 _byteswap_ulong
# define XXH_swap64 _byteswap_uint64
#elif GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
# define XXH_swap64 __builtin_bswap64
#else
static inline U32 XXH_swap32 (U32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );
}
static inline U64 XXH_swap64 (U64 x)
{
return ((x << 56) & 0xff00000000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 56) & 0x00000000000000ffULL);
}
#endif
//**************************************
// Constants
//**************************************
#define PRIME32_1 2654435761U
#define PRIME32_2 2246822519U
#define PRIME32_3 3266489917U
#define PRIME32_4 668265263U
#define PRIME32_5 374761393U
#define PRIME64_1 11400714785074694791ULL
#define PRIME64_2 14029467366897019727ULL
#define PRIME64_3 1609587929392839161ULL
#define PRIME64_4 9650029242287828579ULL
#define PRIME64_5 2870177450012600261ULL
//**************************************
// Architecture Macros
//**************************************
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
#ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
static const int one = 1;
# define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
#endif
//**************************************
// Macros
//**************************************
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations
//****************************
// Memory reads
//****************************
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
else
return endian==XXH_littleEndian ? *(U32*)ptr : XXH_swap32(*(U32*)ptr);
}
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
{
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
}
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr));
else
return endian==XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr);
}
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
{
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
}
//****************************
// Simple Hash Functions
//****************************
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U32 h32;
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL)
{
len=0;
bEnd=p=(const BYTE*)(size_t)16;
}
#endif
if (len>=16)
{
const BYTE* const limit = bEnd - 16;
U32 v1 = seed + PRIME32_1 + PRIME32_2;
U32 v2 = seed + PRIME32_2;
U32 v3 = seed + 0;
U32 v4 = seed - PRIME32_1;
do
{
v1 += XXH_get32bits(p) * PRIME32_2;
v1 = XXH_rotl32(v1, 13);
v1 *= PRIME32_1;
p+=4;
v2 += XXH_get32bits(p) * PRIME32_2;
v2 = XXH_rotl32(v2, 13);
v2 *= PRIME32_1;
p+=4;
v3 += XXH_get32bits(p) * PRIME32_2;
v3 = XXH_rotl32(v3, 13);
v3 *= PRIME32_1;
p+=4;
v4 += XXH_get32bits(p) * PRIME32_2;
v4 = XXH_rotl32(v4, 13);
v4 *= PRIME32_1;
p+=4;
}
while (p<=limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
}
else
{
h32 = seed + PRIME32_5;
}
h32 += (U32) len;
while (p+4<=bEnd)
{
h32 += XXH_get32bits(p) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
p+=4;
}
while (p<bEnd)
{
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
unsigned int XXH32 (const void* input, size_t len, unsigned seed)
{
#if 0
// Simple version, good for code maintenance, but unfortunately slow for small inputs
XXH32_state_t state;
XXH32_reset(&state, seed);
XXH32_update(&state, input, len);
return XXH32_digest(&state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
# if !defined(XXH_USE_UNALIGNED_ACCESS)
if ((((size_t)input) & 3) == 0) // Input is aligned, let's leverage the speed advantage
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
}
# endif
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U64 h64;
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL)
{
len=0;
bEnd=p=(const BYTE*)(size_t)32;
}
#endif
if (len>=32)
{
const BYTE* const limit = bEnd - 32;
U64 v1 = seed + PRIME64_1 + PRIME64_2;
U64 v2 = seed + PRIME64_2;
U64 v3 = seed + 0;
U64 v4 = seed - PRIME64_1;
do
{
v1 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
v2 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
v3 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
v4 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
}
while (p<=limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
v1 *= PRIME64_2;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
h64 ^= v1;
h64 = h64 * PRIME64_1 + PRIME64_4;
v2 *= PRIME64_2;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
h64 ^= v2;
h64 = h64 * PRIME64_1 + PRIME64_4;
v3 *= PRIME64_2;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
h64 ^= v3;
h64 = h64 * PRIME64_1 + PRIME64_4;
v4 *= PRIME64_2;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
h64 ^= v4;
h64 = h64 * PRIME64_1 + PRIME64_4;
}
else
{
h64 = seed + PRIME64_5;
}
h64 += (U64) len;
while (p+8<=bEnd)
{
U64 k1 = XXH_get64bits(p);
k1 *= PRIME64_2;
k1 = XXH_rotl64(k1,31);
k1 *= PRIME64_1;
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd)
{
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd)
{
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
{
#if 0
// Simple version, good for code maintenance, but unfortunately slow for small inputs
XXH64_state_t state;
XXH64_reset(&state, seed);
XXH64_update(&state, input, len);
return XXH64_digest(&state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
# if !defined(XXH_USE_UNALIGNED_ACCESS)
if ((((size_t)input) & 7)==0) // Input is aligned, let's leverage the speed advantage
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
}
# endif
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
/****************************************************
* Advanced Hash Functions
****************************************************/
/*** Allocation ***/
typedef struct
{
U64 total_len;
U32 seed;
U32 v1;
U32 v2;
U32 v3;
U32 v4;
U32 mem32[4]; /* defined as U32 for alignment */
U32 memsize;
} XXH_istate32_t;
typedef struct
{
U64 total_len;
U64 seed;
U64 v1;
U64 v2;
U64 v3;
U64 v4;
U64 mem64[4]; /* defined as U64 for alignment */
U32 memsize;
} XXH_istate64_t;
XXH32_state_t* XXH32_createState(void)
{
XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); // A compilation error here means XXH32_state_t is not large enough
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
}
XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
};
XXH64_state_t* XXH64_createState(void)
{
XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); // A compilation error here means XXH64_state_t is not large enough
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
}
XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
};
/*** Hash feed ***/
XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed)
{
XXH_istate32_t* state = (XXH_istate32_t*) state_in;
state->seed = seed;
state->v1 = seed + PRIME32_1 + PRIME32_2;
state->v2 = seed + PRIME32_2;
state->v3 = seed + 0;
state->v4 = seed - PRIME32_1;
state->total_len = 0;
state->memsize = 0;
return XXH_OK;
}
XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
{
XXH_istate64_t* state = (XXH_istate64_t*) state_in;
state->seed = seed;
state->v1 = seed + PRIME64_1 + PRIME64_2;
state->v2 = seed + PRIME64_2;
state->v3 = seed + 0;
state->v4 = seed - PRIME64_1;
state->total_len = 0;
state->memsize = 0;
return XXH_OK;
}
FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
{
XXH_istate32_t* state = (XXH_istate32_t *) state_in;
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 16) // fill in tmp buffer
{
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
state->memsize += (U32)len;
return XXH_OK;
}
if (state->memsize) // some data left from previous update
{
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
{
const U32* p32 = state->mem32;
state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v1 = XXH_rotl32(state->v1, 13);
state->v1 *= PRIME32_1;
p32++;
state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v2 = XXH_rotl32(state->v2, 13);
state->v2 *= PRIME32_1;
p32++;
state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v3 = XXH_rotl32(state->v3, 13);
state->v3 *= PRIME32_1;
p32++;
state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v4 = XXH_rotl32(state->v4, 13);
state->v4 *= PRIME32_1;
p32++;
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16)
{
const BYTE* const limit = bEnd - 16;
U32 v1 = state->v1;
U32 v2 = state->v2;
U32 v3 = state->v3;
U32 v4 = state->v4;
do
{
v1 += XXH_readLE32(p, endian) * PRIME32_2;
v1 = XXH_rotl32(v1, 13);
v1 *= PRIME32_1;
p+=4;
v2 += XXH_readLE32(p, endian) * PRIME32_2;
v2 = XXH_rotl32(v2, 13);
v2 *= PRIME32_1;
p+=4;
v3 += XXH_readLE32(p, endian) * PRIME32_2;
v3 = XXH_rotl32(v3, 13);
v3 *= PRIME32_1;
p+=4;
v4 += XXH_readLE32(p, endian) * PRIME32_2;
v4 = XXH_rotl32(v4, 13);
v4 *= PRIME32_1;
p+=4;
}
while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd)
{
XXH_memcpy(state->mem32, p, bEnd-p);
state->memsize = (int)(bEnd-p);
}
return XXH_OK;
}
XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian)
{
XXH_istate32_t* state = (XXH_istate32_t*) state_in;
const BYTE * p = (const BYTE*)state->mem32;
BYTE* bEnd = (BYTE*)(state->mem32) + state->memsize;
U32 h32;
if (state->total_len >= 16)
{
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
}
else
{
h32 = state->seed + PRIME32_5;
}
h32 += (U32) state->total_len;
while (p+4<=bEnd)
{
h32 += XXH_readLE32(p, endian) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
p+=4;
}
while (p<bEnd)
{
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
U32 XXH32_digest (const XXH32_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_digest_endian(state_in, XXH_littleEndian);
else
return XXH32_digest_endian(state_in, XXH_bigEndian);
}
FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
{
XXH_istate64_t * state = (XXH_istate64_t *) state_in;
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 32) // fill in tmp buffer
{
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
state->memsize += (U32)len;
return XXH_OK;
}
if (state->memsize) // some data left from previous update
{
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
{
const U64* p64 = state->mem64;
state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v1 = XXH_rotl64(state->v1, 31);
state->v1 *= PRIME64_1;
p64++;
state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v2 = XXH_rotl64(state->v2, 31);
state->v2 *= PRIME64_1;
p64++;
state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v3 = XXH_rotl64(state->v3, 31);
state->v3 *= PRIME64_1;
p64++;
state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v4 = XXH_rotl64(state->v4, 31);
state->v4 *= PRIME64_1;
p64++;
}
p += 32-state->memsize;
state->memsize = 0;
}
if (p+32 <= bEnd)
{
const BYTE* const limit = bEnd - 32;
U64 v1 = state->v1;
U64 v2 = state->v2;
U64 v3 = state->v3;
U64 v4 = state->v4;
do
{
v1 += XXH_readLE64(p, endian) * PRIME64_2;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
p+=8;
v2 += XXH_readLE64(p, endian) * PRIME64_2;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
p+=8;
v3 += XXH_readLE64(p, endian) * PRIME64_2;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
p+=8;
v4 += XXH_readLE64(p, endian) * PRIME64_2;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
p+=8;
}
while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd)
{
XXH_memcpy(state->mem64, p, bEnd-p);
state->memsize = (int)(bEnd-p);
}
return XXH_OK;
}
XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian)
{
XXH_istate64_t * state = (XXH_istate64_t *) state_in;
const BYTE * p = (const BYTE*)state->mem64;
BYTE* bEnd = (BYTE*)state->mem64 + state->memsize;
U64 h64;
if (state->total_len >= 32)
{
U64 v1 = state->v1;
U64 v2 = state->v2;
U64 v3 = state->v3;
U64 v4 = state->v4;
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
v1 *= PRIME64_2;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
h64 ^= v1;
h64 = h64*PRIME64_1 + PRIME64_4;
v2 *= PRIME64_2;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
h64 ^= v2;
h64 = h64*PRIME64_1 + PRIME64_4;
v3 *= PRIME64_2;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
h64 ^= v3;
h64 = h64*PRIME64_1 + PRIME64_4;
v4 *= PRIME64_2;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
h64 ^= v4;
h64 = h64*PRIME64_1 + PRIME64_4;
}
else
{
h64 = state->seed + PRIME64_5;
}
h64 += (U64) state->total_len;
while (p+8<=bEnd)
{
U64 k1 = XXH_readLE64(p, endian);
k1 *= PRIME64_2;
k1 = XXH_rotl64(k1,31);
k1 *= PRIME64_1;
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd)
{
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd)
{
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
unsigned long long XXH64_digest (const XXH64_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_digest_endian(state_in, XXH_littleEndian);
else
return XXH64_digest_endian(state_in, XXH_bigEndian);
}

View File

@ -1,156 +0,0 @@
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : http://code.google.com/p/xxhash/
*/
/* Notice extracted from xxHash homepage :
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
It also successfully passes all tests from the SMHasher suite.
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
Name Speed Q.Score Author
xxHash 5.4 GB/s 10
CrapWow 3.2 GB/s 2 Andrew
MumurHash 3a 2.7 GB/s 10 Austin Appleby
SpookyHash 2.0 GB/s 10 Bob Jenkins
SBox 1.4 GB/s 9 Bret Mulvey
Lookup3 1.2 GB/s 9 Bob Jenkins
SuperFastHash 1.2 GB/s 1 Paul Hsieh
CityHash64 1.05 GB/s 10 Pike & Alakuijala
FNV 0.55 GB/s 5 Fowler, Noll, Vo
CRC32 0.43 GB/s 9
MD5-32 0.33 GB/s 10 Ronald L. Rivest
SHA1-32 0.28 GB/s 10
Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set.
10 is a perfect score.
*/
#pragma once
#if defined (__cplusplus)
extern "C" {
#endif
/*****************************
Includes
*****************************/
#include <stddef.h> /* size_t */
/*****************************
Type
*****************************/
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
/*****************************
Simple Hash Functions
*****************************/
unsigned int XXH32 (const void* input, size_t length, unsigned seed);
unsigned long long XXH64 (const void* input, size_t length, unsigned long long seed);
/*
XXH32() :
Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
The memory between input & input+length must be valid (allocated and read-accessible).
"seed" can be used to alter the result predictably.
This function successfully passes all SMHasher tests.
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
XXH64() :
Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
*/
/*****************************
Advanced Hash Functions
*****************************/
typedef struct { long long ll[ 6]; } XXH32_state_t;
typedef struct { long long ll[11]; } XXH64_state_t;
/*
These structures allow static allocation of XXH states.
States must then be initialized using XXHnn_reset() before first use.
If you prefer dynamic allocation, please refer to functions below.
*/
XXH32_state_t* XXH32_createState(void);
XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
XXH64_state_t* XXH64_createState(void);
XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
/*
These functions create and release memory for XXH state.
States must then be initialized using XXHnn_reset() before first use.
*/
XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned seed);
XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
unsigned int XXH32_digest (const XXH32_state_t* statePtr);
XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
unsigned long long XXH64_digest (const XXH64_state_t* statePtr);
/*
These functions calculate the xxHash of an input provided in multiple smaller packets,
as opposed to an input provided as a single block.
XXH state space must first be allocated, using either static or dynamic method provided above.
Start a new hash by initializing state with a seed, using XXHnn_reset().
Then, feed the hash state by calling XXHnn_update() as many times as necessary.
Obviously, input must be valid, meaning allocated and read accessible.
The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
Finally, you can produce a hash anytime, by using XXHnn_digest().
This function returns the final nn-bits hash.
You can nonetheless continue feeding the hash state with more input,
and therefore get some new hashes, by calling again XXHnn_digest().
When you are done, don't forget to free XXH state space, using typically XXHnn_freeState().
*/
#if defined (__cplusplus)
}
#endif

View File

@ -1,689 +0,0 @@
/*
bench.c - Demo program to benchmark open-source algorithm
Copyright (C) Yann Collet 2012-2014
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
You can contact the author at :
- Blog homepage : http://fastcompression.blogspot.com/
- Discussion group : https://groups.google.com/forum/?fromgroups#!forum/lz4c
*/
/**************************************
* Compiler Options
*************************************/
/* MS Visual */
#if defined(_MSC_VER) || defined(_WIN32)
# define _CRT_SECURE_NO_WARNINGS /* removes visual warnings */
# define BMK_LEGACY_TIMER 1 /* gettimeofday() not supported by MSVC */
#endif
/* Under Linux at least, pull in the *64 commands */
#define _LARGEFILE64_SOURCE
/**************************************
* Includes
*************************************/
#include <stdlib.h> // malloc
#include <stdio.h> // fprintf, fopen, ftello64, fread, stdin, stdout; when present : _fileno
#include <string.h> // strcmp
#include <sys/types.h> // stat64
#include <sys/stat.h> // stat64
#include "xxhash.h"
/**************************************
* OS-Specific Includes
*************************************/
// Use ftime() if gettimeofday() is not available on your target
#if defined(BMK_LEGACY_TIMER)
# include <sys/timeb.h> // timeb, ftime
#else
# include <sys/time.h> // gettimeofday
#endif
#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) || defined(__CYGWIN__)
# include <fcntl.h> // _O_BINARY
# include <io.h> // _setmode, _isatty
# ifdef __MINGW32__
int _fileno(FILE *stream); // MINGW somehow forgets to include this windows declaration into <stdio.h>
# endif
# define SET_BINARY_MODE(file) _setmode(_fileno(file), _O_BINARY)
# define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream))
#else
# include <unistd.h> // isatty, STDIN_FILENO
# define SET_BINARY_MODE(file)
# define IS_CONSOLE(stdStream) isatty(STDIN_FILENO)
#endif
#if !defined(S_ISREG)
# define S_ISREG(x) (((x) & S_IFMT) == S_IFREG)
#endif
/**************************************
* Basic Types
*************************************/
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
#endif
/**************************************
* Constants
*************************************/
#define PROGRAM_NAME exename
#define PROGRAM_VERSION ""
#define COMPILED __DATE__
#define AUTHOR "Yann Collet"
#define WELCOME_MESSAGE "*** %s %i-bits %s, by %s (%s) ***\n", PROGRAM_NAME, (int)(sizeof(void*)*8), PROGRAM_VERSION, AUTHOR, COMPILED
#define NBLOOPS 3 // Default number of benchmark iterations
#define TIMELOOP 2500 // Minimum timing per iteration
#define PRIME 2654435761U
#define KB *(1<<10)
#define MB *(1<<20)
#define GB *(1U<<30)
#define MAX_MEM (2 GB - 64 MB)
static const char stdinName[] = "-";
//**************************************
// Display macros
//**************************************
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
#define DISPLAYRESULT(...) fprintf(stdout, __VA_ARGS__)
#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) DISPLAY(__VA_ARGS__);
static unsigned g_displayLevel = 1;
//**************************************
// Unit variables
//**************************************
static int g_nbIterations = NBLOOPS;
static int g_fn_selection = 1; // required within main() & usage()
//*********************************************************
// Benchmark Functions
//*********************************************************
#if defined(BMK_LEGACY_TIMER)
static int BMK_GetMilliStart(void)
{
// Based on Legacy ftime()
// Rolls over every ~ 12.1 days (0x100000/24/60/60)
// Use GetMilliSpan to correct for rollover
struct timeb tb;
int nCount;
ftime( &tb );
nCount = (int) (tb.millitm + (tb.time & 0xfffff) * 1000);
return nCount;
}
#else
static int BMK_GetMilliStart(void)
{
// Based on newer gettimeofday()
// Use GetMilliSpan to correct for rollover
struct timeval tv;
int nCount;
gettimeofday(&tv, NULL);
nCount = (int) (tv.tv_usec/1000 + (tv.tv_sec & 0xfffff) * 1000);
return nCount;
}
#endif
static int BMK_GetMilliSpan( int nTimeStart )
{
int nSpan = BMK_GetMilliStart() - nTimeStart;
if ( nSpan < 0 )
nSpan += 0x100000 * 1000;
return nSpan;
}
static size_t BMK_findMaxMem(U64 requestedMem)
{
size_t step = (64 MB);
size_t allocatedMemory;
BYTE* testmem=NULL;
requestedMem += 3*step;
requestedMem -= (size_t)requestedMem & (step-1);
if (requestedMem > MAX_MEM) requestedMem = MAX_MEM;
allocatedMemory = (size_t)requestedMem;
while (!testmem)
{
allocatedMemory -= step;
testmem = (BYTE*) malloc((size_t)allocatedMemory);
}
free (testmem);
return (size_t) (allocatedMemory - step);
}
static U64 BMK_GetFileSize(char* infilename)
{
int r;
#if defined(_MSC_VER)
struct _stat64 statbuf;
r = _stat64(infilename, &statbuf);
#else
struct stat statbuf;
r = stat(infilename, &statbuf);
#endif
if (r || !S_ISREG(statbuf.st_mode)) return 0; // No good...
return (U64)statbuf.st_size;
}
static int BMK_benchFile(char** fileNamesTable, int nbFiles)
{
int fileIdx=0;
U32 hashResult=0;
U64 totals = 0;
double totalc = 0.;
// Loop for each file
while (fileIdx<nbFiles)
{
FILE* inFile;
char* inFileName;
U64 inFileSize;
size_t benchedSize;
size_t readSize;
char* buffer;
char* alignedBuffer;
// Check file existence
inFileName = fileNamesTable[fileIdx++];
inFile = fopen( inFileName, "rb" );
if (inFile==NULL)
{
DISPLAY( "Pb opening %s\n", inFileName);
return 11;
}
// Memory allocation & restrictions
inFileSize = BMK_GetFileSize(inFileName);
benchedSize = (size_t) BMK_findMaxMem(inFileSize);
if ((U64)benchedSize > inFileSize) benchedSize = (size_t)inFileSize;
if (benchedSize < inFileSize)
{
DISPLAY("Not enough memory for '%s' full size; testing %i MB only...\n", inFileName, (int)(benchedSize>>20));
}
buffer = (char*)malloc((size_t )benchedSize+16);
if(!buffer)
{
DISPLAY("\nError: not enough memory!\n");
fclose(inFile);
return 12;
}
alignedBuffer = (buffer+15) - (((size_t)(buffer+15)) & 0xF); // align on next 16 bytes boundaries
// Fill input buffer
DISPLAY("\rLoading %s... \n", inFileName);
readSize = fread(alignedBuffer, 1, benchedSize, inFile);
fclose(inFile);
if(readSize != benchedSize)
{
DISPLAY("\nError: problem reading file '%s' !! \n", inFileName);
free(buffer);
return 13;
}
// Bench XXH32
{
int interationNb;
double fastestC = 100000000.;
DISPLAY("\r%79s\r", ""); // Clean display line
for (interationNb = 1; interationNb <= g_nbIterations; interationNb++)
{
int nbHashes = 0;
int milliTime;
DISPLAY("%1i-%-14.14s : %10i ->\r", interationNb, "XXH32", (int)benchedSize);
// Hash loop
milliTime = BMK_GetMilliStart();
while(BMK_GetMilliStart() == milliTime);
milliTime = BMK_GetMilliStart();
while(BMK_GetMilliSpan(milliTime) < TIMELOOP)
{
int i;
for (i=0; i<100; i++)
{
hashResult = XXH32(alignedBuffer, benchedSize, 0);
nbHashes++;
}
}
milliTime = BMK_GetMilliSpan(milliTime);
if ((double)milliTime < fastestC*nbHashes) fastestC = (double)milliTime/nbHashes;
DISPLAY("%1i-%-14.14s : %10i -> %7.1f MB/s\r", interationNb, "XXH32", (int)benchedSize, (double)benchedSize / fastestC / 1000.);
}
DISPLAY("%-16.16s : %10i -> %7.1f MB/s 0x%08X\n", "XXH32", (int)benchedSize, (double)benchedSize / fastestC / 1000., hashResult);
totals += benchedSize;
totalc += fastestC;
}
// Bench Unaligned XXH32
{
int interationNb;
double fastestC = 100000000.;
DISPLAY("\r%79s\r", ""); // Clean display line
for (interationNb = 1; (interationNb <= g_nbIterations) && ((benchedSize>1)); interationNb++)
{
int nbHashes = 0;
int milliTime;
DISPLAY("%1i-%-14.14s : %10i ->\r", interationNb, "(unaligned)", (int)benchedSize);
// Hash loop
milliTime = BMK_GetMilliStart();
while(BMK_GetMilliStart() == milliTime);
milliTime = BMK_GetMilliStart();
while(BMK_GetMilliSpan(milliTime) < TIMELOOP)
{
int i;
for (i=0; i<100; i++)
{
hashResult = XXH32(alignedBuffer+1, benchedSize-1, 0);
nbHashes++;
}
}
milliTime = BMK_GetMilliSpan(milliTime);
if ((double)milliTime < fastestC*nbHashes) fastestC = (double)milliTime/nbHashes;
DISPLAY("%1i-%-14.14s : %10i -> %7.1f MB/s\r", interationNb, "XXH32 (unaligned)", (int)(benchedSize-1), (double)(benchedSize-1) / fastestC / 1000.);
}
DISPLAY("%-16.16s : %10i -> %7.1f MB/s \n", "XXH32 (unaligned)", (int)benchedSize-1, (double)(benchedSize-1) / fastestC / 1000.);
}
// Bench XXH64
{
int interationNb;
double fastestC = 100000000.;
unsigned long long h64 = 0;
DISPLAY("\r%79s\r", ""); // Clean display line
for (interationNb = 1; interationNb <= g_nbIterations; interationNb++)
{
int nbHashes = 0;
int milliTime;
DISPLAY("%1i-%-14.14s : %10i ->\r", interationNb, "XXH64", (int)benchedSize);
// Hash loop
milliTime = BMK_GetMilliStart();
while(BMK_GetMilliStart() == milliTime);
milliTime = BMK_GetMilliStart();
while(BMK_GetMilliSpan(milliTime) < TIMELOOP)
{
int i;
for (i=0; i<100; i++)
{
h64 = XXH64(alignedBuffer, benchedSize, 0);
nbHashes++;
}
}
milliTime = BMK_GetMilliSpan(milliTime);
if ((double)milliTime < fastestC*nbHashes) fastestC = (double)milliTime/nbHashes;
DISPLAY("%1i-%-14.14s : %10i -> %7.1f MB/s\r", interationNb, "XXH64", (int)benchedSize, (double)benchedSize / fastestC / 1000.);
}
DISPLAY("%-16.16s : %10i -> %7.1f MB/s 0x%08X%08X\n", "XXH64", (int)benchedSize, (double)benchedSize / fastestC / 1000., (U32)(h64>>32), (U32)(h64));
totals += benchedSize;
totalc += fastestC;
}
free(buffer);
}
if (nbFiles > 1)
printf("%-16.16s :%11llu -> %7.1f MB/s\n", " TOTAL", (long long unsigned int)totals, (double)totals/totalc/1000.);
return 0;
}
static void BMK_checkResult(U32 r1, U32 r2)
{
static int nbTests = 1;
if (r1==r2) DISPLAY("\rTest%3i : %08X == %08X ok ", nbTests, r1, r2);
else
{
DISPLAY("\rERROR : Test%3i : %08X <> %08X !!!!! \n", nbTests, r1, r2);
exit(1);
}
nbTests++;
}
static void BMK_checkResult64(U64 r1, U64 r2)
{
static int nbTests = 1;
if (r1!=r2)
{
DISPLAY("\rERROR : Test%3i : 64-bits values non equals !!!!! \n", nbTests);
DISPLAY("\r %08X%08X != %08X%08X \n", (U32)(r1>>32), (U32)r1, (U32)(r2<<32), (U32)r2);
exit(1);
}
nbTests++;
}
static void BMK_testSequence64(void* sentence, int len, U64 seed, U64 Nresult)
{
U64 Dresult;
XXH64_state_t state;
int index;
Dresult = XXH64(sentence, len, seed);
BMK_checkResult64(Dresult, Nresult);
XXH64_reset(&state, seed);
XXH64_update(&state, sentence, len);
Dresult = XXH64_digest(&state);
BMK_checkResult64(Dresult, Nresult);
XXH64_reset(&state, seed);
for (index=0; index<len; index++) XXH64_update(&state, ((char*)sentence)+index, 1);
Dresult = XXH64_digest(&state);
BMK_checkResult64(Dresult, Nresult);
}
static void BMK_testSequence(void* sentence, int len, U32 seed, U32 Nresult)
{
U32 Dresult;
XXH32_state_t state;
int index;
Dresult = XXH32(sentence, len, seed);
BMK_checkResult(Dresult, Nresult);
XXH32_reset(&state, seed);
XXH32_update(&state, sentence, len);
Dresult = XXH32_digest(&state);
BMK_checkResult(Dresult, Nresult);
XXH32_reset(&state, seed);
for (index=0; index<len; index++) XXH32_update(&state, ((char*)sentence)+index, 1);
Dresult = XXH32_digest(&state);
BMK_checkResult(Dresult, Nresult);
}
#define SANITY_BUFFER_SIZE 101
static void BMK_sanityCheck(void)
{
BYTE sanityBuffer[SANITY_BUFFER_SIZE];
int i;
U32 prime = PRIME;
for (i=0; i<SANITY_BUFFER_SIZE; i++)
{
sanityBuffer[i] = (BYTE)(prime>>24);
prime *= prime;
}
BMK_testSequence(NULL, 0, 0, 0x02CC5D05);
BMK_testSequence(NULL, 0, PRIME, 0x36B78AE7);
BMK_testSequence(sanityBuffer, 1, 0, 0xB85CBEE5);
BMK_testSequence(sanityBuffer, 1, PRIME, 0xD5845D64);
BMK_testSequence(sanityBuffer, 14, 0, 0xE5AA0AB4);
BMK_testSequence(sanityBuffer, 14, PRIME, 0x4481951D);
BMK_testSequence(sanityBuffer, SANITY_BUFFER_SIZE, 0, 0x1F1AA412);
BMK_testSequence(sanityBuffer, SANITY_BUFFER_SIZE, PRIME, 0x498EC8E2);
BMK_testSequence64(NULL , 0, 0, 0xEF46DB3751D8E999ULL);
BMK_testSequence64(NULL , 0, PRIME, 0xAC75FDA2929B17EFULL);
BMK_testSequence64(sanityBuffer, 1, 0, 0x4FCE394CC88952D8ULL);
BMK_testSequence64(sanityBuffer, 1, PRIME, 0x739840CB819FA723ULL);
BMK_testSequence64(sanityBuffer, 14, 0, 0xCFFA8DB881BC3A3DULL);
BMK_testSequence64(sanityBuffer, 14, PRIME, 0x5B9611585EFCC9CBULL);
BMK_testSequence64(sanityBuffer, SANITY_BUFFER_SIZE, 0, 0x0EAB543384F878ADULL);
BMK_testSequence64(sanityBuffer, SANITY_BUFFER_SIZE, PRIME, 0xCAA65939306F1E21ULL);
DISPLAY("\r%79s\r", ""); // Clean display line
DISPLAYLEVEL(2, "Sanity check -- all tests ok\n");
}
static int BMK_hash(const char* fileName, U32 hashNb)
{
FILE* inFile;
size_t const blockSize = 64 KB;
size_t readSize;
char* buffer;
XXH64_state_t state;
// Check file existence
if (fileName == stdinName)
{
inFile = stdin;
SET_BINARY_MODE(stdin);
}
else
inFile = fopen( fileName, "rb" );
if (inFile==NULL)
{
DISPLAY( "Pb opening %s\n", fileName);
return 11;
}
// Memory allocation & restrictions
buffer = (char*)malloc(blockSize);
if(!buffer)
{
DISPLAY("\nError: not enough memory!\n");
fclose(inFile);
return 12;
}
// Init
switch(hashNb)
{
case 0:
XXH32_reset((XXH32_state_t*)&state, 0);
break;
case 1:
XXH64_reset(&state, 0);
break;
default:
DISPLAY("Error : bad hash algorithm ID\n");
fclose(inFile);
free(buffer);
return -1;
}
// Load file & update hash
DISPLAY("\rLoading %s... \r", fileName);
readSize = 1;
while (readSize)
{
readSize = fread(buffer, 1, blockSize, inFile);
switch(hashNb)
{
case 0:
XXH32_update((XXH32_state_t*)&state, buffer, readSize);
break;
case 1:
XXH64_update(&state, buffer, readSize);
break;
default:
break;
}
}
fclose(inFile);
free(buffer);
// display Hash
switch(hashNb)
{
case 0:
{
U32 h32 = XXH32_digest((XXH32_state_t*)&state);
DISPLAYRESULT("%08x %s \n", h32, fileName);
break;
}
case 1:
{
U64 h64 = XXH64_digest(&state);
DISPLAYRESULT("%08x%08x %s \n", (U32)(h64>>32), (U32)(h64), fileName);
break;
}
default:
break;
}
return 0;
}
//*********************************************************
// Main
//*********************************************************
static int usage(const char* exename)
{
DISPLAY( WELCOME_MESSAGE );
DISPLAY( "Usage :\n");
DISPLAY( " %s [arg] [filename]\n", exename);
DISPLAY( "When no filename provided, or - provided : use stdin as input\n");
DISPLAY( "Arguments :\n");
DISPLAY( " -H# : hash selection : 0=32bits, 1=64bits (default %i)\n", g_fn_selection);
DISPLAY( " -b : benchmark mode \n");
DISPLAY( " -i# : number of iterations (benchmark mode; default %i)\n", g_nbIterations);
DISPLAY( " -h : help (this text)\n");
return 0;
}
static int badusage(const char* exename)
{
DISPLAY("Wrong parameters\n");
usage(exename);
return 1;
}
int main(int argc, char** argv)
{
int i, filenamesStart=0;
const char* input_filename = (char*)stdinName;
const char* exename = argv[0];
U32 benchmarkMode = 0;
// xxh32sum default to 32 bits checksum
if (strstr(exename, "xxh32sum")!=NULL) g_fn_selection=0;
for(i=1; i<argc; i++)
{
char* argument = argv[i];
if(!argument) continue; // Protection if argument empty
if (*argument!='-')
{
input_filename=argument;
if (filenamesStart==0) filenamesStart=i;
continue;
}
// Select command
// note : *argument=='-'
argument++;
while (*argument!=0)
{
switch(*argument)
{
// Display help on usage
case 'h':
return usage(exename);
// select hash algorithm
case 'H':
g_fn_selection = argument[1] - '0';
argument+=2;
break;
// Trigger benchmark mode
case 'b':
argument++;
benchmarkMode=1;
break;
// Modify Nb Iterations (benchmark only)
case 'i':
g_nbIterations = argument[1] - '0';
argument+=2;
break;
default:
return badusage(exename);
}
}
}
// Check if input is defined as console; trigger an error in this case
if ((input_filename == stdinName) && IS_CONSOLE(stdin) ) return badusage(exename);
// Check results are good
if (benchmarkMode)
{
if (filenamesStart==0) return badusage(exename);
DISPLAY( WELCOME_MESSAGE );
BMK_sanityCheck();
return BMK_benchFile(argv+filenamesStart, argc-filenamesStart);
}
if(g_fn_selection < 0 || g_fn_selection > 1) return badusage(exename);
return BMK_hash(input_filename, g_fn_selection);
}