[SPIR-V] Update glslang

WIP new vertex fetching
This commit is contained in:
DrChat 2018-02-18 20:13:38 -06:00
parent 4e1a55f585
commit f3f1a7fe42
37 changed files with 11978 additions and 1326 deletions

View File

@ -22,8 +22,8 @@ project("xenia-gpu")
project_root.."/third_party/gflags/src",
})
local_platform_files()
local_platform_files("spirv")
local_platform_files("spirv/passes")
-- local_platform_files("spirv")
-- local_platform_files("spirv/passes")
group("src")
project("xenia-gpu-shader-compiler")

View File

@ -16,8 +16,6 @@
#include <vector>
#include "xenia/base/logging.h"
#include "xenia/gpu/spirv/passes/control_flow_analysis_pass.h"
#include "xenia/gpu/spirv/passes/control_flow_simplification_pass.h"
DEFINE_bool(spv_validate, false, "Validate SPIR-V shaders after generation");
DEFINE_bool(spv_disasm, false, "Disassemble SPIR-V shaders after generation");
@ -33,16 +31,12 @@ using spv::GLSLstd450;
using spv::Id;
using spv::Op;
SpirvShaderTranslator::SpirvShaderTranslator() {
compiler_.AddPass(std::make_unique<spirv::ControlFlowSimplificationPass>());
compiler_.AddPass(std::make_unique<spirv::ControlFlowAnalysisPass>());
}
SpirvShaderTranslator::SpirvShaderTranslator() {}
SpirvShaderTranslator::~SpirvShaderTranslator() = default;
void SpirvShaderTranslator::StartTranslation() {
// Create a new builder.
builder_ = std::make_unique<spv::Builder>(0xFFFFFFFF);
builder_ = std::make_unique<spv::Builder>(SPV_VERSION, 0xFFFFFFFF, nullptr);
auto& b = *builder_;
// Import required modules.
@ -225,69 +219,14 @@ void SpirvShaderTranslator::StartTranslation() {
Id interpolators_type = b.makeArrayType(
vec4_float_type_, b.makeUintConstant(kMaxInterpolators), 0);
if (is_vertex_shader()) {
// Vertex inputs/outputs.
for (const auto& binding : vertex_bindings()) {
for (const auto& attrib : binding.attributes) {
Id attrib_type = 0;
bool is_signed = attrib.fetch_instr.attributes.is_signed;
bool is_integer = attrib.fetch_instr.attributes.is_integer;
switch (attrib.fetch_instr.attributes.data_format) {
case VertexFormat::k_32:
case VertexFormat::k_32_FLOAT:
attrib_type = float_type_;
break;
case VertexFormat::k_16_16:
case VertexFormat::k_32_32:
if (is_integer) {
attrib_type = is_signed ? vec2_int_type_ : vec2_uint_type_;
break;
}
// Intentionally fall through to float type.
case VertexFormat::k_16_16_FLOAT:
case VertexFormat::k_32_32_FLOAT:
attrib_type = vec2_float_type_;
break;
case VertexFormat::k_32_32_32_FLOAT:
attrib_type = vec3_float_type_;
break;
case VertexFormat::k_2_10_10_10:
attrib_type = vec4_float_type_;
break;
case VertexFormat::k_8_8_8_8:
case VertexFormat::k_16_16_16_16:
case VertexFormat::k_32_32_32_32:
if (is_integer) {
attrib_type = is_signed ? vec4_int_type_ : vec4_uint_type_;
break;
}
// Intentionally fall through to float type.
case VertexFormat::k_16_16_16_16_FLOAT:
case VertexFormat::k_32_32_32_32_FLOAT:
attrib_type = vec4_float_type_;
break;
case VertexFormat::k_10_11_11:
case VertexFormat::k_11_11_10:
// Manually converted.
attrib_type = is_signed ? int_type_ : uint_type_;
break;
default:
assert_always();
}
auto attrib_var = b.createVariable(
spv::StorageClass::StorageClassInput, attrib_type,
xe::format_string("vf%d_%d", binding.fetch_constant,
attrib.fetch_instr.attributes.offset)
.c_str());
b.addDecoration(attrib_var, spv::Decoration::DecorationLocation,
attrib.attrib_index);
interface_ids_.push_back(attrib_var);
vertex_binding_map_[binding.fetch_constant]
[attrib.fetch_instr.attributes.offset] = attrib_var;
}
}
// Vertex inputs/outputs
// Inputs: 32 SSBOs on DS 2 binding 0
Id vtx_t = b.makeRuntimeArray(uint_type_);
Id vtx_a_t = b.makeArrayType(vtx_t, b.makeUintConstant(32), 0);
vtx_ = b.createVariable(spv::StorageClass::StorageClassUniform, vtx_a_t,
"vertex_bindings");
// Outputs
interpolators_ = b.createVariable(spv::StorageClass::StorageClassOutput,
interpolators_type, "interpolators");
b.addDecoration(interpolators_, spv::Decoration::DecorationLocation, 0);
@ -421,7 +360,7 @@ void SpirvShaderTranslator::StartTranslation() {
auto cond = b.createBinOp(spv::Op::OpINotEqual, bool_type_,
ps_param_gen_idx, b.makeUintConstant(-1));
spv::Builder::If ifb(cond, b);
spv::Builder::If ifb(cond, 0, b);
// FYI: We do this instead of r[ps_param_gen_idx] because that causes
// nvidia to move all registers into local memory (slow!)
@ -458,7 +397,7 @@ void SpirvShaderTranslator::StartTranslation() {
// While loop header block
b.setBuildPoint(loop_head_block_);
b.createLoopMerge(loop_exit_block_, loop_cont_block_,
spv::LoopControlMask::LoopControlDontUnrollMask);
spv::LoopControlMask::LoopControlDontUnrollMask, 0);
b.createBranch(block);
// Condition block
@ -481,7 +420,8 @@ std::vector<uint8_t> SpirvShaderTranslator::CompleteTranslation() {
exec_skip_block_ = nullptr;
// main() entry point.
auto mainFn = b.makeMain();
auto mainFn =
b.makeFunctionEntry(spv::NoPrecision, b.makeVoidType(), "main", {}, {});
if (is_vertex_shader()) {
auto entry = b.addEntryPoint(spv::ExecutionModel::ExecutionModelVertex,
mainFn, "main");
@ -570,12 +510,12 @@ std::vector<uint8_t> SpirvShaderTranslator::CompleteTranslation() {
auto cond = b.createBinOp(spv::Op::OpFOrdEqual, bool_type_,
alpha_test_enabled, b.makeFloatConstant(1.f));
spv::Builder::If alpha_if(cond, b);
spv::Builder::If alpha_if(cond, 0, b);
std::vector<spv::Block*> switch_segments;
b.makeSwitch(alpha_test_func, 8, std::vector<int>({0, 1, 2, 3, 4, 5, 6, 7}),
std::vector<int>({0, 1, 2, 3, 4, 5, 6, 7}), 7,
switch_segments);
b.makeSwitch(
alpha_test_func, 0, 8, std::vector<int>({0, 1, 2, 3, 4, 5, 6, 7}),
std::vector<int>({0, 1, 2, 3, 4, 5, 6, 7}), 7, switch_segments);
const static spv::Op alpha_op_map[] = {
spv::Op::OpNop,
@ -597,7 +537,7 @@ std::vector<uint8_t> SpirvShaderTranslator::CompleteTranslation() {
b.nextSwitchSegment(switch_segments, i);
auto cond =
b.createBinOp(alpha_op_map[i], bool_type_, oC0_alpha, alpha_test_ref);
spv::Builder::If discard_if(cond, b);
spv::Builder::If discard_if(cond, 0, b);
b.makeDiscard();
discard_if.makeEndIf();
b.addSwitchBreak();
@ -1475,7 +1415,8 @@ void SpirvShaderTranslator::ProcessTextureFetchInstruction(
std::memset(&params, 0, sizeof(params));
params.sampler = image;
params.lod = b.makeIntConstant(0);
size = b.createTextureQueryCall(spv::Op::OpImageQuerySizeLod, params);
size = b.createTextureQueryCall(spv::Op::OpImageQuerySizeLod, params,
false);
if (instr.dimension == TextureDimension::k1D) {
size = b.createUnaryOp(spv::Op::OpConvertSToF, float_type_, size);
@ -1563,8 +1504,8 @@ void SpirvShaderTranslator::ProcessTextureFetchInstruction(
std::memset(&params, 0, sizeof(params));
params.sampler = image;
params.lod = b.makeIntConstant(0);
auto size =
b.createTextureQueryCall(spv::Op::OpImageQuerySizeLod, params);
auto size = b.createTextureQueryCall(spv::Op::OpImageQuerySizeLod,
params, true);
size =
b.createUnaryOp(spv::Op::OpConvertUToF, vec2_float_type_, size);
@ -1624,7 +1565,7 @@ spv::Function* SpirvShaderTranslator::CreateCubeFunction() {
spv::Block* function_block = nullptr;
auto function = b.makeFunctionEntry(spv::NoPrecision, vec4_float_type_,
"cube", {vec4_float_type_},
{spv::NoPrecision}, &function_block);
{{spv::NoPrecision}}, &function_block);
auto src = function->getParamId(0);
auto face_id = b.createVariable(spv::StorageClass::StorageClassFunction,
float_type_, "face_id");
@ -1685,7 +1626,7 @@ spv::Function* SpirvShaderTranslator::CreateCubeFunction() {
auto x_gt_z = b.createBinOp(spv::Op::OpFOrdGreaterThan, bool_type_,
abs_src_x, abs_src_z);
auto c1 = b.createBinOp(spv::Op::OpLogicalAnd, bool_type_, x_gt_y, x_gt_z);
spv::Builder::If if1(c1, b);
spv::Builder::If if1(c1, 0, b);
// sc = abs(src).y
b.createStore(abs_src_y, sc);
@ -1720,7 +1661,7 @@ spv::Function* SpirvShaderTranslator::CreateCubeFunction() {
auto y_gt_z = b.createBinOp(spv::Op::OpFOrdGreaterThan, bool_type_,
abs_src_y, abs_src_z);
auto c1 = b.createBinOp(spv::Op::OpLogicalAnd, bool_type_, y_gt_x, y_gt_z);
spv::Builder::If if1(c1, b);
spv::Builder::If if1(c1, 0, b);
// tc = -abs(src).x
b.createStore(neg_src_x, tc);
@ -1755,7 +1696,7 @@ spv::Function* SpirvShaderTranslator::CreateCubeFunction() {
auto z_gt_y = b.createBinOp(spv::Op::OpFOrdGreaterThan, bool_type_,
abs_src_z, abs_src_y);
auto c1 = b.createBinOp(spv::Op::OpLogicalAnd, bool_type_, z_gt_x, z_gt_y);
spv::Builder::If if1(c1, b);
spv::Builder::If if1(c1, 0, b);
// tc = -abs(src).x
b.createStore(neg_src_x, tc);

View File

@ -17,7 +17,6 @@
#include "third_party/glslang-spirv/SpvBuilder.h"
#include "third_party/spirv/GLSL.std.450.hpp11"
#include "xenia/gpu/shader_translator.h"
#include "xenia/gpu/spirv/compiler.h"
#include "xenia/ui/spirv/spirv_disassembler.h"
#include "xenia/ui/spirv/spirv_validator.h"
@ -112,7 +111,6 @@ class SpirvShaderTranslator : public ShaderTranslator {
xe::ui::spirv::SpirvDisassembler disassembler_;
xe::ui::spirv::SpirvValidator validator_;
xe::gpu::spirv::Compiler compiler_;
// True if there's an open predicated block
bool open_predicated_block_ = false;
@ -159,6 +157,7 @@ class SpirvShaderTranslator : public ShaderTranslator {
spv::Id frag_outputs_ = 0, frag_depth_ = 0;
spv::Id samplers_ = 0;
spv::Id tex_[3] = {0}; // Images {2D, 3D, Cube}
spv::Id vtx_ = 0; // Vertex buffer array (32 runtime arrays)
// SPIR-V IDs that are part of the in/out interface.
std::vector<spv::Id> interface_ids_;

110
third_party/glslang-spirv/GLSL.ext.AMD.h vendored Normal file
View File

@ -0,0 +1,110 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextAMD_H
#define GLSLextAMD_H
enum BuiltIn;
enum Capability;
enum Decoration;
enum Op;
static const int GLSLextAMDVersion = 100;
static const int GLSLextAMDRevision = 6;
// SPV_AMD_shader_ballot
static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
enum ShaderBallotAMD {
ShaderBallotBadAMD = 0, // Don't use
SwizzleInvocationsAMD = 1,
SwizzleInvocationsMaskedAMD = 2,
WriteInvocationAMD = 3,
MbcntAMD = 4,
ShaderBallotCountAMD
};
// SPV_AMD_shader_trinary_minmax
static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax";
enum ShaderTrinaryMinMaxAMD {
ShaderTrinaryMinMaxBadAMD = 0, // Don't use
FMin3AMD = 1,
UMin3AMD = 2,
SMin3AMD = 3,
FMax3AMD = 4,
UMax3AMD = 5,
SMax3AMD = 6,
FMid3AMD = 7,
UMid3AMD = 8,
SMid3AMD = 9,
ShaderTrinaryMinMaxCountAMD
};
// SPV_AMD_shader_explicit_vertex_parameter
static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter";
enum ShaderExplicitVertexParameterAMD {
ShaderExplicitVertexParameterBadAMD = 0, // Don't use
InterpolateAtVertexAMD = 1,
ShaderExplicitVertexParameterCountAMD
};
// SPV_AMD_gcn_shader
static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader";
enum GcnShaderAMD {
GcnShaderBadAMD = 0, // Don't use
CubeFaceIndexAMD = 1,
CubeFaceCoordAMD = 2,
TimeAMD = 3,
GcnShaderCountAMD
};
// SPV_AMD_gpu_shader_half_float
static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
// SPV_AMD_texture_gather_bias_lod
static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod";
// SPV_AMD_gpu_shader_int16
static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16";
// SPV_AMD_shader_image_load_store_lod
static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod";
// SPV_AMD_shader_fragment_mask
static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask";
#endif // #ifndef GLSLextAMD_H

View File

@ -0,0 +1,39 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextEXT_H
#define GLSLextEXT_H
enum BuiltIn;
enum Op;
enum Capability;
static const int GLSLextEXTVersion = 100;
static const int GLSLextEXTRevision = 1;
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
#endif // #ifndef GLSLextEXT_H

View File

@ -0,0 +1,48 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextKHR_H
#define GLSLextKHR_H
enum BuiltIn;
enum Op;
enum Capability;
static const int GLSLextKHRVersion = 100;
static const int GLSLextKHRRevision = 2;
static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot";
static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote";
static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group";
static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview";
static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters";
static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage";
static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export";
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
#endif // #ifndef GLSLextKHR_H

54
third_party/glslang-spirv/GLSL.ext.NV.h vendored Normal file
View File

@ -0,0 +1,54 @@
/*
** Copyright (c) 2014-2017 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextNV_H
#define GLSLextNV_H
enum BuiltIn;
enum Decoration;
enum Op;
enum Capability;
static const int GLSLextNVVersion = 100;
static const int GLSLextNVRevision = 5;
//SPV_NV_sample_mask_override_coverage
const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage";
//SPV_NV_geometry_shader_passthrough
const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough";
//SPV_NV_viewport_array2
const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2";
const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array";
//SPV_NV_stereo_view_rendering
const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering";
//SPV_NVX_multiview_per_view_attributes
const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes";
#endif // #ifndef GLSLextNV_H

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2014 LunarG, Inc.
// Copyright (C) 2014 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,25 +19,49 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#pragma once
#if _MSC_VER >= 1900
#pragma warning(disable : 4464) // relative include path contains '..'
#endif
#include "../glslang/Include/intermediate.h"
#include <string>
#include <vector>
#include "Logger.h"
namespace glslang {
void GetSpirvVersion(std::string&);
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv);
void OutputSpv(const std::vector<unsigned int>& spirv, const char* baseName);
struct SpvOptions {
SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
optimizeSize(false) { }
bool generateDebugInfo;
bool disableOptimizer;
bool optimizeSize;
};
void GetSpirvVersion(std::string&);
int GetSpirvGeneratorVersion();
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
SpvOptions* options = nullptr);
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger, SpvOptions* options = nullptr);
void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName);
}

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2016 Google, Inc.
// Copyright (C) 2016 Google, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,22 +19,18 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
//
// Author: Dejan Mircevski, Google
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// The SPIR-V spec requires code blocks to appear in an order satisfying the
// dominator-tree direction (ie, dominator before the dominated). This is,
@ -55,7 +51,7 @@
#include "spvIR.h"
#include <cassert>
#include <unordered_map>
#include <unordered_set>
using spv::Block;
using spv::Id;
@ -73,32 +69,33 @@ public:
void visit(Block* block)
{
assert(block);
if (visited_[block] || delayed_[block])
if (visited_.count(block) || delayed_.count(block))
return;
callback_(block);
visited_[block] = true;
visited_.insert(block);
Block* mergeBlock = nullptr;
Block* continueBlock = nullptr;
auto mergeInst = block->getMergeInstruction();
if (mergeInst) {
Id mergeId = mergeInst->getIdOperand(0);
mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock();
delayed_[mergeBlock] = true;
delayed_.insert(mergeBlock);
if (mergeInst->getOpCode() == spv::OpLoopMerge) {
Id continueId = mergeInst->getIdOperand(1);
continueBlock =
block->getParent().getParent().getInstruction(continueId)->getBlock();
delayed_[continueBlock] = true;
delayed_.insert(continueBlock);
}
}
for (const auto succ : block->getSuccessors())
visit(succ);
const auto successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it);
if (continueBlock) {
delayed_[continueBlock] = false;
delayed_.erase(continueBlock);
visit(continueBlock);
}
if (mergeBlock) {
delayed_[mergeBlock] = false;
delayed_.erase(mergeBlock);
visit(mergeBlock);
}
}
@ -106,7 +103,7 @@ public:
private:
std::function<void(Block*)> callback_;
// Whether a block has already been visited or is being delayed.
std::unordered_map<Block *, bool> visited_, delayed_;
std::unordered_set<Block *> visited_, delayed_;
};
}

View File

@ -0,0 +1,387 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _BASICTYPES_INCLUDED_
#define _BASICTYPES_INCLUDED_
namespace glslang {
//
// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
//
enum TBasicType {
EbtVoid,
EbtFloat,
EbtDouble,
#ifdef AMD_EXTENSIONS
EbtFloat16,
#endif
EbtInt,
EbtUint,
EbtInt64,
EbtUint64,
#ifdef AMD_EXTENSIONS
EbtInt16,
EbtUint16,
#endif
EbtBool,
EbtAtomicUint,
EbtSampler,
EbtStruct,
EbtBlock,
// HLSL types that live only temporarily.
EbtString,
EbtNumTypes
};
//
// Storage qualifiers. Should align with different kinds of storage or
// resource or GLSL storage qualifier. Expansion is deprecated.
//
// N.B.: You probably DON'T want to add anything here, but rather just add it
// to the built-in variables. See the comment above TBuiltInVariable.
//
// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
// DO NOT follow the design pattern of, say EvqInstanceId, etc.
//
enum TStorageQualifier {
EvqTemporary, // For temporaries (within a function), read/write
EvqGlobal, // For globals read/write
EvqConst, // User-defined constant values, will be semantically constant and constant folded
EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
EvqUniform, // read only, shared with app
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
EvqInOut,
EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
// built-ins read by vertex shader
EvqVertexId,
EvqInstanceId,
// built-ins written by vertex shader
EvqPosition,
EvqPointSize,
EvqClipVertex,
// built-ins read by fragment shader
EvqFace,
EvqFragCoord,
EvqPointCoord,
// built-ins written by fragment shader
EvqFragColor,
EvqFragDepth,
// end of list
EvqLast
};
//
// Subcategories of the TStorageQualifier, simply to give a direct mapping
// between built-in variable names and an numerical value (the enum).
//
// For backward compatibility, there is some redundancy between the
// TStorageQualifier and these. Existing members should both be maintained accurately.
// However, any new built-in variable (and any existing non-redundant one)
// must follow the pattern that the specific built-in is here, and only its
// general qualifier is in TStorageQualifier.
//
// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
// shows up as two different built-in variables in a single stage, but
// only has a single enum in TBuiltInVariable, so both the
// TStorageQualifier and the TBuitinVariable are needed to distinguish
// between them.
//
enum TBuiltInVariable {
EbvNone,
EbvNumWorkGroups,
EbvWorkGroupSize,
EbvWorkGroupId,
EbvLocalInvocationId,
EbvGlobalInvocationId,
EbvLocalInvocationIndex,
EbvSubGroupSize,
EbvSubGroupInvocation,
EbvSubGroupEqMask,
EbvSubGroupGeMask,
EbvSubGroupGtMask,
EbvSubGroupLeMask,
EbvSubGroupLtMask,
EbvVertexId,
EbvInstanceId,
EbvVertexIndex,
EbvInstanceIndex,
EbvBaseVertex,
EbvBaseInstance,
EbvDrawId,
EbvPosition,
EbvPointSize,
EbvClipVertex,
EbvClipDistance,
EbvCullDistance,
EbvNormal,
EbvVertex,
EbvMultiTexCoord0,
EbvMultiTexCoord1,
EbvMultiTexCoord2,
EbvMultiTexCoord3,
EbvMultiTexCoord4,
EbvMultiTexCoord5,
EbvMultiTexCoord6,
EbvMultiTexCoord7,
EbvFrontColor,
EbvBackColor,
EbvFrontSecondaryColor,
EbvBackSecondaryColor,
EbvTexCoord,
EbvFogFragCoord,
EbvInvocationId,
EbvPrimitiveId,
EbvLayer,
EbvViewportIndex,
EbvPatchVertices,
EbvTessLevelOuter,
EbvTessLevelInner,
EbvBoundingBox,
EbvTessCoord,
EbvColor,
EbvSecondaryColor,
EbvFace,
EbvFragCoord,
EbvPointCoord,
EbvFragColor,
EbvFragData,
EbvFragDepth,
EbvFragStencilRef,
EbvSampleId,
EbvSamplePosition,
EbvSampleMask,
EbvHelperInvocation,
#ifdef AMD_EXTENSIONS
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
EbvBaryCoordSmooth,
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
#endif
EbvViewIndex,
EbvDeviceIndex,
#ifdef NV_EXTENSIONS
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
EbvPositionPerViewNV,
EbvViewportMaskPerViewNV,
EbvFragFullyCoveredNV,
#endif
// HLSL built-ins that live only temporarily, until they get remapped
// to one of the above.
EbvFragDepthGreater,
EbvFragDepthLesser,
EbvGsOutputStream,
EbvOutputPatch,
EbvInputPatch,
// structbuffer types
EbvAppendConsume, // no need to differentiate append and consume
EbvRWStructuredBuffer,
EbvStructuredBuffer,
EbvByteAddressBuffer,
EbvRWByteAddressBuffer,
EbvLast
};
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
switch (q) {
case EvqTemporary: return "temp"; break;
case EvqGlobal: return "global"; break;
case EvqConst: return "const"; break;
case EvqConstReadOnly: return "const (read only)"; break;
case EvqVaryingIn: return "in"; break;
case EvqVaryingOut: return "out"; break;
case EvqUniform: return "uniform"; break;
case EvqBuffer: return "buffer"; break;
case EvqShared: return "shared"; break;
case EvqIn: return "in"; break;
case EvqOut: return "out"; break;
case EvqInOut: return "inout"; break;
case EvqVertexId: return "gl_VertexId"; break;
case EvqInstanceId: return "gl_InstanceId"; break;
case EvqPosition: return "gl_Position"; break;
case EvqPointSize: return "gl_PointSize"; break;
case EvqClipVertex: return "gl_ClipVertex"; break;
case EvqFace: return "gl_FrontFacing"; break;
case EvqFragCoord: return "gl_FragCoord"; break;
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
default: return "unknown qualifier";
}
}
__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
{
switch (v) {
case EbvNone: return "";
case EbvNumWorkGroups: return "NumWorkGroups";
case EbvWorkGroupSize: return "WorkGroupSize";
case EbvWorkGroupId: return "WorkGroupID";
case EbvLocalInvocationId: return "LocalInvocationID";
case EbvGlobalInvocationId: return "GlobalInvocationID";
case EbvLocalInvocationIndex: return "LocalInvocationIndex";
case EbvSubGroupSize: return "SubGroupSize";
case EbvSubGroupInvocation: return "SubGroupInvocation";
case EbvSubGroupEqMask: return "SubGroupEqMask";
case EbvSubGroupGeMask: return "SubGroupGeMask";
case EbvSubGroupGtMask: return "SubGroupGtMask";
case EbvSubGroupLeMask: return "SubGroupLeMask";
case EbvSubGroupLtMask: return "SubGroupLtMask";
case EbvVertexId: return "VertexId";
case EbvInstanceId: return "InstanceId";
case EbvVertexIndex: return "VertexIndex";
case EbvInstanceIndex: return "InstanceIndex";
case EbvBaseVertex: return "BaseVertex";
case EbvBaseInstance: return "BaseInstance";
case EbvDrawId: return "DrawId";
case EbvPosition: return "Position";
case EbvPointSize: return "PointSize";
case EbvClipVertex: return "ClipVertex";
case EbvClipDistance: return "ClipDistance";
case EbvCullDistance: return "CullDistance";
case EbvNormal: return "Normal";
case EbvVertex: return "Vertex";
case EbvMultiTexCoord0: return "MultiTexCoord0";
case EbvMultiTexCoord1: return "MultiTexCoord1";
case EbvMultiTexCoord2: return "MultiTexCoord2";
case EbvMultiTexCoord3: return "MultiTexCoord3";
case EbvMultiTexCoord4: return "MultiTexCoord4";
case EbvMultiTexCoord5: return "MultiTexCoord5";
case EbvMultiTexCoord6: return "MultiTexCoord6";
case EbvMultiTexCoord7: return "MultiTexCoord7";
case EbvFrontColor: return "FrontColor";
case EbvBackColor: return "BackColor";
case EbvFrontSecondaryColor: return "FrontSecondaryColor";
case EbvBackSecondaryColor: return "BackSecondaryColor";
case EbvTexCoord: return "TexCoord";
case EbvFogFragCoord: return "FogFragCoord";
case EbvInvocationId: return "InvocationID";
case EbvPrimitiveId: return "PrimitiveID";
case EbvLayer: return "Layer";
case EbvViewportIndex: return "ViewportIndex";
case EbvPatchVertices: return "PatchVertices";
case EbvTessLevelOuter: return "TessLevelOuter";
case EbvTessLevelInner: return "TessLevelInner";
case EbvBoundingBox: return "BoundingBox";
case EbvTessCoord: return "TessCoord";
case EbvColor: return "Color";
case EbvSecondaryColor: return "SecondaryColor";
case EbvFace: return "Face";
case EbvFragCoord: return "FragCoord";
case EbvPointCoord: return "PointCoord";
case EbvFragColor: return "FragColor";
case EbvFragData: return "FragData";
case EbvFragDepth: return "FragDepth";
case EbvFragStencilRef: return "FragStencilRef";
case EbvSampleId: return "SampleId";
case EbvSamplePosition: return "SamplePosition";
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
#ifdef AMD_EXTENSIONS
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
case EbvBaryCoordSmooth: return "BaryCoordSmooth";
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
#endif
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
#ifdef NV_EXTENSIONS
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case EbvPositionPerViewNV: return "PositionPerViewNV";
case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
#endif
default: return "unknown built-in variable";
}
}
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch(p) {
case EpqNone: return ""; break;
case EpqLow: return "lowp"; break;
case EpqMedium: return "mediump"; break;
case EpqHigh: return "highp"; break;
default: return "unknown precision qualifier";
}
}
} // end namespace glslang
#endif // _BASICTYPES_INCLUDED_

View File

@ -0,0 +1,274 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _COMMON_INCLUDED_
#define _COMMON_INCLUDED_
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
#include <basetsd.h>
#define snprintf sprintf_s
#define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
#elif defined (solaris)
#define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
#include <sys/int_types.h>
#define UINT_PTR uintptr_t
#else
#define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
#include <stdint.h>
#define UINT_PTR uintptr_t
#endif
#if defined(__ANDROID__) || _MSC_VER < 1700
#include <sstream>
namespace std {
template<typename T>
std::string to_string(const T& val) {
std::ostringstream os;
os << val;
return os.str();
}
}
#endif
#if defined(_MSC_VER) && _MSC_VER < 1800
inline long long int strtoll (const char* str, char** endptr, int base)
{
return _strtoi64(str, endptr, base);
}
inline unsigned long long int strtoull (const char* str, char** endptr, int base)
{
return _strtoui64(str, endptr, base);
}
inline long long int atoll (const char* str)
{
return strtoll(str, NULL, 10);
}
#endif
#if defined(_MSC_VER)
#define strdup _strdup
#endif
/* windows only pragma */
#ifdef _MSC_VER
#pragma warning(disable : 4786) // Don't warn about too long identifiers
#pragma warning(disable : 4514) // unused inline method
#pragma warning(disable : 4201) // nameless union
#endif
#include <set>
#include <unordered_set>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cassert>
#include "PoolAlloc.h"
//
// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
//
#define POOL_ALLOCATOR_NEW_DELETE(A) \
void* operator new(size_t s) { return (A).allocate(s); } \
void* operator new(size_t, void *_Where) { return (_Where); } \
void operator delete(void*) { } \
void operator delete(void *, void *) { } \
void* operator new[](size_t s) { return (A).allocate(s); } \
void* operator new[](size_t, void *_Where) { return (_Where); } \
void operator delete[](void*) { } \
void operator delete[](void *, void *) { }
namespace glslang {
//
// Pool version of string.
//
typedef pool_allocator<char> TStringAllocator;
typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
} // end namespace glslang
// Repackage the std::hash for use by unordered map/set with a TString key.
namespace std {
template<> struct hash<glslang::TString> {
std::size_t operator()(const glslang::TString& s) const
{
const unsigned _FNV_offset_basis = 2166136261U;
const unsigned _FNV_prime = 16777619U;
unsigned _Val = _FNV_offset_basis;
size_t _Count = s.size();
const char* _First = s.c_str();
for (size_t _Next = 0; _Next < _Count; ++_Next)
{
_Val ^= (unsigned)_First[_Next];
_Val *= _FNV_prime;
}
return _Val;
}
};
}
namespace glslang {
inline TString* NewPoolTString(const char* s)
{
void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
return new(memory) TString(s);
}
template<class T> inline T* NewPoolObject(T*)
{
return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
}
template<class T> inline T* NewPoolObject(T, int instances)
{
return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
}
//
// Pool allocator versions of vectors, lists, and maps
//
template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
TVector() : std::vector<T, pool_allocator<T> >() {}
TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
};
template <class T> class TList : public std::list<T, pool_allocator<T> > {
};
template <class K, class D, class CMP = std::less<K> >
class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
};
template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
};
//
// Persistent string memory. Should only be used for strings that survive
// across compiles/links.
//
typedef std::basic_string<char> TPersistString;
//
// templatized min and max functions.
//
template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
//
// Create a TString object from an integer.
//
#if defined _MSC_VER || defined MINGW_HAS_SECURE_API
inline const TString String(const int i, const int base = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
_itoa_s(i, text, sizeof(text), base);
return text;
}
#else
inline const TString String(const int i, const int /*base*/ = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
// we assume base 10 for all cases
snprintf(text, sizeof(text), "%d", i);
return text;
}
#endif
struct TSourceLoc {
void init() { name = nullptr; string = 0; line = 0; column = 0; }
void init(int stringNum) { init(); string = stringNum; }
// Returns the name if it exists. Otherwise, returns the string number.
std::string getStringNameOrNum(bool quoteStringName = true) const
{
if (name != nullptr)
return quoteStringName ? ("\"" + std::string(name) + "\"") : name;
return std::to_string((long long)string);
}
const char* name; // descriptive name for this string
int string;
int line;
int column;
};
class TPragmaTable : public TMap<TString, TString> {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
};
const int MaxTokenLength = 1024;
template <class T> bool IsPow2(T powerOf2)
{
if (powerOf2 <= 0)
return false;
return (powerOf2 & (powerOf2 - 1)) == 0;
}
// Round number up to a multiple of the given powerOf2, which is not
// a power, just a number that must be a power of 2.
template <class T> void RoundToPow2(T& number, int powerOf2)
{
assert(IsPow2(powerOf2));
number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
}
template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
{
assert(IsPow2(powerOf2));
return ! (number & (powerOf2 - 1));
}
} // end namespace glslang
#endif // _COMMON_INCLUDED_

View File

@ -0,0 +1,625 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _CONSTANT_UNION_INCLUDED_
#define _CONSTANT_UNION_INCLUDED_
#include "../Include/Common.h"
#include "../Include/BaseTypes.h"
namespace glslang {
class TConstUnion {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TConstUnion() : iConst(0), type(EbtInt) { }
void setIConst(int i)
{
iConst = i;
type = EbtInt;
}
void setUConst(unsigned int u)
{
uConst = u;
type = EbtUint;
}
void setI64Const(long long i64)
{
i64Const = i64;
type = EbtInt64;
}
void setU64Const(unsigned long long u64)
{
u64Const = u64;
type = EbtUint64;
}
void setDConst(double d)
{
dConst = d;
type = EbtDouble;
}
void setBConst(bool b)
{
bConst = b;
type = EbtBool;
}
void setSConst(const TString* s)
{
sConst = s;
type = EbtString;
}
int getIConst() const { return iConst; }
unsigned int getUConst() const { return uConst; }
long long getI64Const() const { return i64Const; }
unsigned long long getU64Const() const { return u64Const; }
double getDConst() const { return dConst; }
bool getBConst() const { return bConst; }
const TString* getSConst() const { return sConst; }
bool operator==(const int i) const
{
if (i == iConst)
return true;
return false;
}
bool operator==(const unsigned int u) const
{
if (u == uConst)
return true;
return false;
}
bool operator==(const long long i64) const
{
if (i64 == i64Const)
return true;
return false;
}
bool operator==(const unsigned long long u64) const
{
if (u64 == u64Const)
return true;
return false;
}
bool operator==(const double d) const
{
if (d == dConst)
return true;
return false;
}
bool operator==(const bool b) const
{
if (b == bConst)
return true;
return false;
}
bool operator==(const TConstUnion& constant) const
{
if (constant.type != type)
return false;
switch (type) {
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtInt64:
if (constant.i64Const == i64Const)
return true;
break;
case EbtUint64:
if (constant.u64Const == u64Const)
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
default:
assert(false && "Default missing");
}
return false;
}
bool operator!=(const int i) const
{
return !operator==(i);
}
bool operator!=(const unsigned int u) const
{
return !operator==(u);
}
bool operator!=(const long long i) const
{
return !operator==(i);
}
bool operator!=(const unsigned long long u) const
{
return !operator==(u);
}
bool operator!=(const float f) const
{
return !operator==(f);
}
bool operator!=(const bool b) const
{
return !operator==(b);
}
bool operator!=(const TConstUnion& constant) const
{
return !operator==(constant);
}
bool operator>(const TConstUnion& constant) const
{
assert(type == constant.type);
switch (type) {
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const > constant.i64Const)
return true;
return false;
case EbtUint64:
if (u64Const > constant.u64Const)
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
default:
assert(false && "Default missing");
return false;
}
}
bool operator<(const TConstUnion& constant) const
{
assert(type == constant.type);
switch (type) {
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const < constant.i64Const)
return true;
return false;
case EbtUint64:
if (u64Const < constant.u64Const)
return true;
return false;
case EbtDouble:
if (dConst < constant.dConst)
return true;
return false;
default:
assert(false && "Default missing");
return false;
}
}
TConstUnion operator+(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator-(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator*(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator%(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator>>(const TConstUnion& constant) const
{
TConstUnion returnValue;
switch (type) {
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint64:
switch (constant.type) {
case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator<<(const TConstUnion& constant) const
{
TConstUnion returnValue;
switch (type) {
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint64:
switch (constant.type) {
case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator&(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator|(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator^(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator~() const
{
TConstUnion returnValue;
switch (type) {
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
case EbtInt64: returnValue.setI64Const(~i64Const); break;
case EbtUint64: returnValue.setU64Const(~u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator&&(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator||(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TBasicType getType() const { return type; }
private:
union {
int iConst; // used for ivec, scalar ints
unsigned int uConst; // used for uvec, scalar uints
long long i64Const; // used for i64vec, scalar int64s
unsigned long long u64Const; // used for u64vec, scalar uint64s
bool bConst; // used for bvec, scalar bools
double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
const TString* sConst; // string constant
};
TBasicType type;
};
// Encapsulate having a pointer to an array of TConstUnion,
// which only needs to be allocated if its size is going to be
// bigger than 0.
//
// One convenience is being able to use [] to go inside the array, instead
// of C++ assuming it as an array of pointers to vectors.
//
// General usage is that the size is known up front, and it is
// created once with the proper size.
//
class TConstUnionArray {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TConstUnionArray() : unionArray(nullptr) { }
virtual ~TConstUnionArray() { }
explicit TConstUnionArray(int size)
{
if (size == 0)
unionArray = nullptr;
else
unionArray = new TConstUnionVector(size);
}
TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
TConstUnionArray(const TConstUnionArray& a, int start, int size)
{
unionArray = new TConstUnionVector(size);
for (int i = 0; i < size; ++i)
(*unionArray)[i] = a[start + i];
}
// Use this constructor for a smear operation
TConstUnionArray(int size, const TConstUnion& val)
{
unionArray = new TConstUnionVector(size, val);
}
int size() const { return unionArray ? (int)unionArray->size() : 0; }
TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
bool operator==(const TConstUnionArray& rhs) const
{
// this includes the case that both are unallocated
if (unionArray == rhs.unionArray)
return true;
if (! unionArray || ! rhs.unionArray)
return false;
return *unionArray == *rhs.unionArray;
}
bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
double dot(const TConstUnionArray& rhs)
{
assert(rhs.unionArray->size() == unionArray->size());
double sum = 0.0;
for (size_t comp = 0; comp < unionArray->size(); ++comp)
sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
return sum;
}
bool empty() const { return unionArray == nullptr; }
protected:
typedef TVector<TConstUnion> TConstUnionVector;
TConstUnionVector* unionArray;
};
} // end namespace glslang
#endif // _CONSTANT_UNION_INCLUDED_

View File

@ -0,0 +1,144 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _INFOSINK_INCLUDED_
#define _INFOSINK_INCLUDED_
#include "../Include/Common.h"
#include <cmath>
namespace glslang {
//
// TPrefixType is used to centralize how info log messages start.
// See below.
//
enum TPrefixType {
EPrefixNone,
EPrefixWarning,
EPrefixError,
EPrefixInternalError,
EPrefixUnimplemented,
EPrefixNote
};
enum TOutputStream {
ENull = 0,
EDebugger = 0x01,
EStdOut = 0x02,
EString = 0x04,
};
//
// Encapsulate info logs for all objects that have them.
//
// The methods are a general set of tools for getting a variety of
// messages and types inserted into the log.
//
class TInfoSinkBase {
public:
TInfoSinkBase() : outputStream(4) {}
void erase() { sink.erase(); }
TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
append(buf);
return *this; }
TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
const char* c_str() const { return sink.c_str(); }
void prefix(TPrefixType message) {
switch(message) {
case EPrefixNone: break;
case EPrefixWarning: append("WARNING: "); break;
case EPrefixError: append("ERROR: "); break;
case EPrefixInternalError: append("INTERNAL ERROR: "); break;
case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
case EPrefixNote: append("NOTE: "); break;
default: append("UNKNOWN ERROR: "); break;
}
}
void location(const TSourceLoc& loc) {
const int maxSize = 24;
char locText[maxSize];
snprintf(locText, maxSize, ":%d", loc.line);
append(loc.getStringNameOrNum(false).c_str());
append(locText);
append(": ");
}
void message(TPrefixType message, const char* s) {
prefix(message);
append(s);
append("\n");
}
void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
prefix(message);
location(loc);
append(s);
append("\n");
}
void setOutputStream(int output = 4)
{
outputStream = output;
}
protected:
void append(const char* s);
void append(int count, char c);
void append(const TPersistString& t);
void append(const TString& t);
void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
sink.reserve(sink.capacity() + sink.capacity() / 2); }
void appendToStream(const char* s);
TPersistString sink;
int outputStream;
};
} // end namespace glslang
class TInfoSink {
public:
glslang::TInfoSinkBase info;
glslang::TInfoSinkBase debug;
};
#endif // _INFOSINK_INCLUDED_

View File

@ -0,0 +1,44 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef __INITIALIZE_GLOBALS_INCLUDED_
#define __INITIALIZE_GLOBALS_INCLUDED_
namespace glslang {
bool InitializePoolIndex();
} // end namespace glslang
#endif // __INITIALIZE_GLOBALS_INCLUDED_

View File

@ -0,0 +1,317 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _POOLALLOC_INCLUDED_
#define _POOLALLOC_INCLUDED_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
// STL containers can use this allocator by using the pool_allocator
// class as the allocator (second) template argument.
//
#include <cstddef>
#include <cstring>
#include <vector>
namespace glslang {
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class TAllocation {
public:
TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
size(size), mem(mem), prevAlloc(prev) {
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
# ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
# endif
}
void check() const {
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size) {
return size + 2 * guardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char* offsetAllocation(unsigned char* m) {
return m + guardBlockSize + headerSize();
}
private:
void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char* preGuard() const { return mem + headerSize(); }
unsigned char* data() const { return preGuard() + guardBlockSize; }
unsigned char* postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char* mem; // beginning of our allocation (pts to header)
TAllocation* prevAlloc; // prior allocation in the chain
const static unsigned char guardBlockBeginVal;
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
# ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
# else
inline static size_t headerSize() { return 0; }
# endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class TPoolAllocator {
public:
TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
//
// Don't call the destructor just to free up the memory, call pop()
//
~TPoolAllocator();
//
// Call push() to establish a new place to pop memory too. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void* allocate(size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
protected:
friend struct tHeader;
struct tHeader {
tHeader(tHeader* nextPage, size_t pageCount) :
#ifdef GUARD_BLOCKS
lastAllocation(0),
#endif
nextPage(nextPage), pageCount(pageCount) { }
~tHeader() {
#ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
#endif
}
#ifdef GUARD_BLOCKS
TAllocation* lastAllocation;
#endif
tHeader* nextPage;
size_t pageCount;
};
struct tAllocState {
size_t offset;
tHeader* page;
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
#ifndef GUARD_BLOCKS
void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
#else
void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
new(memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
#endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}
size_t pageSize; // granularity of allocation from the OS
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
size_t headerSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t currentPageOffset; // next offset in top of inUseList to allocate from
tHeader* freeList; // list of popped memory
tHeader* inUseList; // list of all memory currently being used
tAllocStack stack; // stack of where to allocate from, to partition pool
int numCalls; // just an interesting statistic
size_t totalBytes; // just an interesting statistic
private:
TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
};
//
// There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop
// with everyone using the same global allocator.
//
extern TPoolAllocator& GetThreadPoolAllocator();
void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
//
// This STL compatible allocator is intended to be used as the allocator
// parameter to templatized STL containers, like vector and map.
//
// It will use the pools for allocation, and not
// do any deallocation, but will still do destruction.
//
template<class T>
class pool_allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class Other>
struct rebind {
typedef pool_allocator<Other> other;
};
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pool_allocator() : allocator(GetThreadPoolAllocator()) { }
pool_allocator(TPoolAllocator& a) : allocator(a) { }
pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
template<class Other>
pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
pointer allocate(size_type n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
pointer allocate(size_type n, const void*) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
void deallocate(void*, size_type) { }
void deallocate(pointer, size_type) { }
pointer _Charalloc(size_t n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
void construct(pointer p, const T& val) { new ((void *)p) T(val); }
void destroy(pointer p) { p->T::~T(); }
bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
void setAllocator(TPoolAllocator* a) { allocator = *a; }
TPoolAllocator& getAllocator() const { return allocator; }
protected:
pool_allocator& operator=(const pool_allocator&) { return *this; }
TPoolAllocator& allocator;
};
} // end namespace glslang
#endif // _POOLALLOC_INCLUDED_

View File

@ -0,0 +1,140 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _RESOURCE_LIMITS_INCLUDED_
#define _RESOURCE_LIMITS_INCLUDED_
struct TLimits {
bool nonInductiveForLoops;
bool whileLoops;
bool doWhileLoops;
bool generalUniformIndexing;
bool generalAttributeMatrixVectorIndexing;
bool generalVaryingIndexing;
bool generalSamplerIndexing;
bool generalVariableIndexing;
bool generalConstantMatrixVectorIndexing;
};
struct TBuiltInResource {
int maxLights;
int maxClipPlanes;
int maxTextureUnits;
int maxTextureCoords;
int maxVertexAttribs;
int maxVertexUniformComponents;
int maxVaryingFloats;
int maxVertexTextureImageUnits;
int maxCombinedTextureImageUnits;
int maxTextureImageUnits;
int maxFragmentUniformComponents;
int maxDrawBuffers;
int maxVertexUniformVectors;
int maxVaryingVectors;
int maxFragmentUniformVectors;
int maxVertexOutputVectors;
int maxFragmentInputVectors;
int minProgramTexelOffset;
int maxProgramTexelOffset;
int maxClipDistances;
int maxComputeWorkGroupCountX;
int maxComputeWorkGroupCountY;
int maxComputeWorkGroupCountZ;
int maxComputeWorkGroupSizeX;
int maxComputeWorkGroupSizeY;
int maxComputeWorkGroupSizeZ;
int maxComputeUniformComponents;
int maxComputeTextureImageUnits;
int maxComputeImageUniforms;
int maxComputeAtomicCounters;
int maxComputeAtomicCounterBuffers;
int maxVaryingComponents;
int maxVertexOutputComponents;
int maxGeometryInputComponents;
int maxGeometryOutputComponents;
int maxFragmentInputComponents;
int maxImageUnits;
int maxCombinedImageUnitsAndFragmentOutputs;
int maxCombinedShaderOutputResources;
int maxImageSamples;
int maxVertexImageUniforms;
int maxTessControlImageUniforms;
int maxTessEvaluationImageUniforms;
int maxGeometryImageUniforms;
int maxFragmentImageUniforms;
int maxCombinedImageUniforms;
int maxGeometryTextureImageUnits;
int maxGeometryOutputVertices;
int maxGeometryTotalOutputComponents;
int maxGeometryUniformComponents;
int maxGeometryVaryingComponents;
int maxTessControlInputComponents;
int maxTessControlOutputComponents;
int maxTessControlTextureImageUnits;
int maxTessControlUniformComponents;
int maxTessControlTotalOutputComponents;
int maxTessEvaluationInputComponents;
int maxTessEvaluationOutputComponents;
int maxTessEvaluationTextureImageUnits;
int maxTessEvaluationUniformComponents;
int maxTessPatchComponents;
int maxPatchVertices;
int maxTessGenLevel;
int maxViewports;
int maxVertexAtomicCounters;
int maxTessControlAtomicCounters;
int maxTessEvaluationAtomicCounters;
int maxGeometryAtomicCounters;
int maxFragmentAtomicCounters;
int maxCombinedAtomicCounters;
int maxAtomicCounterBindings;
int maxVertexAtomicCounterBuffers;
int maxTessControlAtomicCounterBuffers;
int maxTessEvaluationAtomicCounterBuffers;
int maxGeometryAtomicCounterBuffers;
int maxFragmentAtomicCounterBuffers;
int maxCombinedAtomicCounterBuffers;
int maxAtomicCounterBufferSize;
int maxTransformFeedbackBuffers;
int maxTransformFeedbackInterleavedComponents;
int maxCullDistances;
int maxCombinedClipAndCullDistances;
int maxSamples;
TLimits limits;
};
#endif // _RESOURCE_LIMITS_INCLUDED_

View File

@ -0,0 +1,176 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _SHHANDLE_INCLUDED_
#define _SHHANDLE_INCLUDED_
//
// Machine independent part of the compiler private objects
// sent as ShHandle to the driver.
//
// This should not be included by driver code.
//
#define SH_EXPORTING
#include "../Public/ShaderLang.h"
#include "../MachineIndependent/Versions.h"
#include "InfoSink.h"
class TCompiler;
class TLinker;
class TUniformMap;
//
// The base class used to back handles returned to the driver.
//
class TShHandleBase {
public:
TShHandleBase() { pool = new glslang::TPoolAllocator; }
virtual ~TShHandleBase() { delete pool; }
virtual TCompiler* getAsCompiler() { return 0; }
virtual TLinker* getAsLinker() { return 0; }
virtual TUniformMap* getAsUniformMap() { return 0; }
virtual glslang::TPoolAllocator* getPool() const { return pool; }
private:
glslang::TPoolAllocator* pool;
};
//
// The base class for the machine dependent linker to derive from
// for managing where uniforms live.
//
class TUniformMap : public TShHandleBase {
public:
TUniformMap() { }
virtual ~TUniformMap() { }
virtual TUniformMap* getAsUniformMap() { return this; }
virtual int getLocation(const char* name) = 0;
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink infoSink;
};
class TIntermNode;
//
// The base class for the machine dependent compiler to derive from
// for managing object code from the compile.
//
class TCompiler : public TShHandleBase {
public:
TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
virtual ~TCompiler() { }
EShLanguage getLanguage() { return language; }
virtual TInfoSink& getInfoSink() { return infoSink; }
virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
virtual TCompiler* getAsCompiler() { return this; }
virtual bool linkable() { return haveValidObjectCode; }
TInfoSink& infoSink;
protected:
TCompiler& operator=(TCompiler&);
EShLanguage language;
bool haveValidObjectCode;
};
//
// Link operations are based on a list of compile results...
//
typedef glslang::TVector<TCompiler*> TCompilerList;
typedef glslang::TVector<TShHandleBase*> THandleList;
//
// The base class for the machine dependent linker to derive from
// to manage the resulting executable.
//
class TLinker : public TShHandleBase {
public:
TLinker(EShExecutable e, TInfoSink& iSink) :
infoSink(iSink),
executable(e),
haveReturnableObjectCode(false),
appAttributeBindings(0),
fixedAttributeBindings(0),
excludedAttributes(0),
excludedCount(0),
uniformBindings(0) { }
virtual TLinker* getAsLinker() { return this; }
virtual ~TLinker() { }
virtual bool link(TCompilerList&, TUniformMap*) = 0;
virtual bool link(THandleList&) { return false; }
virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink& infoSink;
protected:
TLinker& operator=(TLinker&);
EShExecutable executable;
bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
const ShBindingTable* appAttributeBindings;
const ShBindingTable* fixedAttributeBindings;
const int* excludedAttributes;
int excludedCount;
ShBindingTable* uniformBindings; // created by the linker
};
//
// This is the interface between the machine independent code
// and the machine dependent code.
//
// The machine dependent code should derive from the classes
// above. Then Construct*() and Delete*() will create and
// destroy the machine dependent objects, which contain the
// above machine independent information.
//
TCompiler* ConstructCompiler(EShLanguage, int);
TShHandleBase* ConstructLinker(EShExecutable, int);
TShHandleBase* ConstructBindings();
void DeleteLinker(TShHandleBase*);
void DeleteBindingList(TShHandleBase* bindingList);
TUniformMap* ConstructUniformMap();
void DeleteCompiler(TCompiler*);
void DeleteUniformMap(TUniformMap*);
#endif // _SHHANDLE_INCLUDED_

1924
third_party/glslang-spirv/Include/Types.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,329 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Implement types for tracking GLSL arrays, arrays of arrays, etc.
//
#ifndef _ARRAYS_INCLUDED
#define _ARRAYS_INCLUDED
namespace glslang {
// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
const int UnsizedArraySize = 0;
class TIntermTyped;
extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
// Specialization constants need both a nominal size and a node that defines
// the specialization constant being used. Array types are the same when their
// size and specialization constant nodes are the same.
struct TArraySize {
unsigned int size;
TIntermTyped* node; // nullptr means no specialization constant node
bool operator==(const TArraySize& rhs) const
{
if (size != rhs.size)
return false;
if (node == nullptr || rhs.node == nullptr)
return node == rhs.node;
return SameSpecializationConstants(node, rhs.node);
}
};
//
// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
// It has generic-container semantics, while TArraySizes has array-of-array semantics.
// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
//
struct TSmallArrayVector {
//
// TODO: memory: TSmallArrayVector is intended to be smaller.
// Almost all arrays could be handled by two sizes each fitting
// in 16 bits, needing a real vector only in the cases where there
// are more than 3 sizes or a size needing more than 16 bits.
//
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSmallArrayVector() : sizes(nullptr) { }
virtual ~TSmallArrayVector() { dealloc(); }
// For breaking into two non-shared copies, independently modifiable.
TSmallArrayVector& operator=(const TSmallArrayVector& from)
{
if (from.sizes == nullptr)
sizes = nullptr;
else {
alloc();
*sizes = *from.sizes;
}
return *this;
}
int size() const
{
if (sizes == nullptr)
return 0;
return (int)sizes->size();
}
unsigned int frontSize() const
{
assert(sizes != nullptr && sizes->size() > 0);
return sizes->front().size;
}
TIntermTyped* frontNode() const
{
assert(sizes != nullptr && sizes->size() > 0);
return sizes->front().node;
}
void changeFront(unsigned int s)
{
assert(sizes != nullptr);
// this should only happen for implicitly sized arrays, not specialization constants
assert(sizes->front().node == nullptr);
sizes->front().size = s;
}
void push_back(unsigned int e, TIntermTyped* n)
{
alloc();
TArraySize pair = { e, n };
sizes->push_back(pair);
}
void push_front(const TSmallArrayVector& newDims)
{
alloc();
sizes->insert(sizes->begin(), newDims.sizes->begin(), newDims.sizes->end());
}
void pop_front()
{
assert(sizes != nullptr && sizes->size() > 0);
if (sizes->size() == 1)
dealloc();
else
sizes->erase(sizes->begin());
}
// 'this' should currently not be holding anything, and copyNonFront
// will make it hold a copy of all but the first element of rhs.
// (This would be useful for making a type that is dereferenced by
// one dimension.)
void copyNonFront(const TSmallArrayVector& rhs)
{
assert(sizes == nullptr);
if (rhs.size() > 1) {
alloc();
sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
}
}
unsigned int getDimSize(int i) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
return (*sizes)[i].size;
}
void setDimSize(int i, unsigned int size) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
assert((*sizes)[i].node == nullptr);
(*sizes)[i].size = size;
}
TIntermTyped* getDimNode(int i) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
return (*sizes)[i].node;
}
bool operator==(const TSmallArrayVector& rhs) const
{
if (sizes == nullptr && rhs.sizes == nullptr)
return true;
if (sizes == nullptr || rhs.sizes == nullptr)
return false;
return *sizes == *rhs.sizes;
}
bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
protected:
TSmallArrayVector(const TSmallArrayVector&);
void alloc()
{
if (sizes == nullptr)
sizes = new TVector<TArraySize>;
}
void dealloc()
{
delete sizes;
sizes = nullptr;
}
TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
};
//
// Represent an array, or array of arrays, to arbitrary depth. This is not
// done through a hierarchy of types in a type tree, rather all contiguous arrayness
// in the type hierarchy is localized into this single cumulative object.
//
// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
// for the vast majority of types that are non-array types.
//
// Order Policy: these are all identical:
// - left to right order within a contiguous set of ...[..][..][..]... in the source language
// - index order 0, 1, 2, ... within the 'sizes' member below
// - outer-most to inner-most
//
struct TArraySizes {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TArraySizes() : implicitArraySize(1) { }
// For breaking into two non-shared copies, independently modifiable.
TArraySizes& operator=(const TArraySizes& from)
{
implicitArraySize = from.implicitArraySize;
sizes = from.sizes;
return *this;
}
// translate from array-of-array semantics to container semantics
int getNumDims() const { return sizes.size(); }
int getDimSize(int dim) const { return sizes.getDimSize(dim); }
TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
int getOuterSize() const { return sizes.frontSize(); }
TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
int getCumulativeSize() const
{
int size = 1;
for (int d = 0; d < sizes.size(); ++d) {
// this only makes sense in paths that have a known array size
assert(sizes.getDimSize(d) != UnsizedArraySize);
size *= sizes.getDimSize(d);
}
return size;
}
void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
void addInnerSize(TArraySize pair) { sizes.push_back(pair.size, pair.node); }
void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
int getImplicitSize() const { return (int)implicitArraySize; }
void setImplicitSize(int s) { implicitArraySize = s; }
bool isInnerImplicit() const
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
return true;
}
return false;
}
bool clearInnerImplicit()
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
setDimSize(d, 1);
}
return false;
}
bool isInnerSpecialization() const
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimNode(d) != nullptr)
return true;
}
return false;
}
bool isOuterSpecialization()
{
return sizes.getDimNode(0) != nullptr;
}
bool isImplicit() const { return getOuterSize() == UnsizedArraySize || isInnerImplicit(); }
void addOuterSizes(const TArraySizes& s) { sizes.push_front(s.sizes); }
void dereference() { sizes.pop_front(); }
void copyDereferenced(const TArraySizes& rhs)
{
assert(sizes.size() == 0);
if (rhs.sizes.size() > 1)
sizes.copyNonFront(rhs.sizes);
}
bool sameInnerArrayness(const TArraySizes& rhs) const
{
if (sizes.size() != rhs.sizes.size())
return false;
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
return false;
}
return true;
}
bool operator==(const TArraySizes& rhs) { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) { return sizes != rhs.sizes; }
protected:
TSmallArrayVector sizes;
TArraySizes(const TArraySizes&);
// for tracking maximum referenced index, before an explicit size is given
// applies only to the outer-most dimension
int implicitArraySize;
};
} // end namespace glslang
#endif // _ARRAYS_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
// This header is generated by the make-revision script.
// For the version, it uses the latest git tag followed by the number of commits.
// For the date, it uses the current date (when then script is run).
#define GLSLANG_REVISION "Overload400-PrecQual.2000"
#define GLSLANG_DATE "12-Apr-2017"

View File

@ -0,0 +1,13 @@
// The file revision.h should be updated to the latest version, somehow, on
// check-in, if glslang has changed.
//
// revision.template is the source for revision.h when using SubWCRev as the
// method of updating revision.h. You don't have to do it this way, the
// requirement is only that revision.h gets updated.
//
// revision.h is under source control so that not all consumers of glslang
// source have to figure out how to create revision.h just to get a build
// going. However, if it is not updated, it can be a version behind.
#define GLSLANG_REVISION "$WCREV$"
#define GLSLANG_DATE "$WCDATE$"

68
third_party/glslang-spirv/Logger.cpp vendored Normal file
View File

@ -0,0 +1,68 @@
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "Logger.h"
#include <algorithm>
#include <iterator>
#include <sstream>
namespace spv {
void SpvBuildLogger::tbdFunctionality(const std::string& f)
{
if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures))
tbdFeatures.push_back(f);
}
void SpvBuildLogger::missingFunctionality(const std::string& f)
{
if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures))
missingFeatures.push_back(f);
}
std::string SpvBuildLogger::getAllMessages() const {
std::ostringstream messages;
for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it)
messages << "TBD functionality: " << *it << "\n";
for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it)
messages << "Missing functionality: " << *it << "\n";
for (auto it = warnings.cbegin(); it != warnings.cend(); ++it)
messages << "warning: " << *it << "\n";
for (auto it = errors.cbegin(); it != errors.cend(); ++it)
messages << "error: " << *it << "\n";
return messages.str();
}
} // end spv namespace

74
third_party/glslang-spirv/Logger.h vendored Normal file
View File

@ -0,0 +1,74 @@
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_SPIRV_LOGGER_H
#define GLSLANG_SPIRV_LOGGER_H
#include <string>
#include <vector>
namespace spv {
// A class for holding all SPIR-V build status messages, including
// missing/TBD functionalities, warnings, and errors.
class SpvBuildLogger {
public:
SpvBuildLogger() {}
// Registers a TBD functionality.
void tbdFunctionality(const std::string& f);
// Registers a missing functionality.
void missingFunctionality(const std::string& f);
// Logs a warning.
void warning(const std::string& w) { warnings.push_back(w); }
// Logs an error.
void error(const std::string& e) { errors.push_back(e); }
// Returns all messages accumulated in the order of:
// TBD functionalities, missing functionalities, warnings, errors.
std::string getAllMessages() const;
private:
SpvBuildLogger(const SpvBuildLogger&);
std::vector<std::string> tbdFeatures;
std::vector<std::string> missingFeatures;
std::vector<std::string> warnings;
std::vector<std::string> errors;
};
} // end spv namespace
#endif // GLSLANG_SPIRV_LOGGER_H

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2015 LunarG, Inc.
// Copyright (C) 2015 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,18 +19,18 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "SPVRemapper.h"
@ -42,6 +42,7 @@
#include <algorithm>
#include <cassert>
#include "Include/Common.h"
namespace spv {
@ -102,10 +103,10 @@ namespace spv {
switch (opCode) {
case spv::OpTypeVector: // fall through
case spv::OpTypeMatrix: // ...
case spv::OpTypeSampler: // ...
case spv::OpTypeArray: // ...
case spv::OpTypeRuntimeArray: // ...
case spv::OpTypeMatrix: // ...
case spv::OpTypeSampler: // ...
case spv::OpTypeArray: // ...
case spv::OpTypeRuntimeArray: // ...
case spv::OpTypePipe: return range_t(2, 3);
case spv::OpTypeStruct: // fall through
case spv::OpTypeFunction: return range_t(2, maxCount);
@ -126,6 +127,38 @@ namespace spv {
}
}
// Return the size of a type in 32-bit words. This currently only
// handles ints and floats, and is only invoked by queries which must be
// integer types. If ever needed, it can be generalized.
unsigned spirvbin_t::typeSizeInWords(spv::Id id) const
{
const unsigned typeStart = idPos(id);
const spv::Op opCode = asOpCode(typeStart);
if (errorLatch)
return 0;
switch (opCode) {
case spv::OpTypeInt: // fall through...
case spv::OpTypeFloat: return (spv[typeStart+2]+31)/32;
default:
return 0;
}
}
// Looks up the type of a given const or variable ID, and
// returns its size in 32-bit words.
unsigned spirvbin_t::idTypeSizeInWords(spv::Id id) const
{
const auto tid_it = idTypeSizeMap.find(id);
if (tid_it == idTypeSizeMap.end()) {
error("type size for ID not found");
return 0;
}
return tid_it->second;
}
// Is this an opcode we should remove when using --strip?
bool spirvbin_t::isStripOp(spv::Op opCode) const
{
@ -139,6 +172,7 @@ namespace spv {
}
}
// Return true if this opcode is flow control
bool spirvbin_t::isFlowCtrl(spv::Op opCode) const
{
switch (opCode) {
@ -154,6 +188,7 @@ namespace spv {
}
}
// Return true if this opcode defines a type
bool spirvbin_t::isTypeOp(spv::Op opCode) const
{
switch (opCode) {
@ -181,17 +216,23 @@ namespace spv {
}
}
// Return true if this opcode defines a constant
bool spirvbin_t::isConstOp(spv::Op opCode) const
{
switch (opCode) {
case spv::OpConstantNull: error("unimplemented constant type");
case spv::OpConstantSampler: error("unimplemented constant type");
case spv::OpConstantNull:
case spv::OpConstantSampler:
error("unimplemented constant type");
return true;
case spv::OpConstantTrue:
case spv::OpConstantFalse:
case spv::OpConstantComposite:
case spv::OpConstant: return true;
default: return false;
case spv::OpConstant:
return true;
default:
return false;
}
}
@ -217,19 +258,31 @@ namespace spv {
{
assert(id != spv::NoResult && newId != spv::NoResult);
if (id > bound()) {
error(std::string("ID out of range: ") + std::to_string(id));
return spirvbin_t::unused;
}
if (id >= idMapL.size())
idMapL.resize(id+1, unused);
if (newId != unmapped && newId != unused) {
if (isOldIdUnused(id))
if (isOldIdUnused(id)) {
error(std::string("ID unused in module: ") + std::to_string(id));
return spirvbin_t::unused;
}
if (!isOldIdUnmapped(id))
if (!isOldIdUnmapped(id)) {
error(std::string("ID already mapped: ") + std::to_string(id) + " -> "
+ std::to_string(localId(id)));
+ std::to_string(localId(id)));
if (isNewIdMapped(newId))
return spirvbin_t::unused;
}
if (isNewIdMapped(newId)) {
error(std::string("ID already used in module: ") + std::to_string(newId));
return spirvbin_t::unused;
}
msg(4, 4, std::string("map: ") + std::to_string(id) + " -> " + std::to_string(newId));
setMapped(newId);
@ -255,7 +308,6 @@ namespace spv {
return literal;
}
void spirvbin_t::applyMap()
{
msg(3, 2, std::string("Applying map: "));
@ -264,12 +316,15 @@ namespace spv {
process(inst_fn_nop, // ignore instructions
[this](spv::Id& id) {
id = localId(id);
if (errorLatch)
return;
assert(id != unused && id != unmapped);
}
);
}
// Find free IDs for anything we haven't mapped
void spirvbin_t::mapRemainder()
{
@ -283,25 +338,31 @@ namespace spv {
continue;
// Find a new mapping for any used but unmapped IDs
if (isOldIdUnmapped(id))
if (isOldIdUnmapped(id)) {
localId(id, unusedId = nextUnusedId(unusedId));
if (errorLatch)
return;
}
if (isOldIdUnmapped(id))
if (isOldIdUnmapped(id)) {
error(std::string("old ID not mapped: ") + std::to_string(id));
return;
}
// Track max bound
maxBound = std::max(maxBound, localId(id) + 1);
if (errorLatch)
return;
}
bound(maxBound); // reset header ID bound to as big as it now needs to be
}
// Mark debug instructions for stripping
void spirvbin_t::stripDebug()
{
if ((options & STRIP) == 0)
return;
// build local Id and name maps
// Strip instructions in the stripOp set: debug info.
process(
[&](spv::Op opCode, unsigned start) {
// remember opcodes we want to strip later
@ -312,6 +373,32 @@ namespace spv {
op_fn_nop);
}
// Mark instructions that refer to now-removed IDs for stripping
void spirvbin_t::stripDeadRefs()
{
process(
[&](spv::Op opCode, unsigned start) {
// strip opcodes pointing to removed data
switch (opCode) {
case spv::OpName:
case spv::OpMemberName:
case spv::OpDecorate:
case spv::OpMemberDecorate:
if (idPosR.find(asId(start+1)) == idPosR.end())
stripInst(start);
break;
default:
break; // leave it alone
}
return true;
},
op_fn_nop);
strip();
}
// Update local maps of ID, type, etc positions
void spirvbin_t::buildLocalMaps()
{
msg(2, 2, std::string("build local maps: "));
@ -320,10 +407,9 @@ namespace spv {
idMapL.clear();
// preserve nameMap, so we don't clear that.
fnPos.clear();
fnPosDCE.clear();
fnCalls.clear();
typeConstPos.clear();
typeConstPosR.clear();
idPosR.clear();
entryPoint = spv::NoResult;
largestNewId = 0;
@ -335,9 +421,27 @@ namespace spv {
// build local Id and name maps
process(
[&](spv::Op opCode, unsigned start) {
// remember opcodes we want to strip later
if ((options & STRIP) && isStripOp(opCode))
stripInst(start);
unsigned word = start+1;
spv::Id typeId = spv::NoResult;
if (spv::InstructionDesc[opCode].hasType())
typeId = asId(word++);
// If there's a result ID, remember the size of its type
if (spv::InstructionDesc[opCode].hasResult()) {
const spv::Id resultId = asId(word++);
idPosR[resultId] = start;
if (typeId != spv::NoResult) {
const unsigned idTypeSize = typeSizeInWords(typeId);
if (errorLatch)
return false;
if (idTypeSize != 0)
idTypeSizeMap[resultId] = idTypeSize;
}
}
if (opCode == spv::Op::OpName) {
const spv::Id target = asId(start+1);
@ -349,24 +453,31 @@ namespace spv {
} else if (opCode == spv::Op::OpEntryPoint) {
entryPoint = asId(start + 2);
} else if (opCode == spv::Op::OpFunction) {
if (fnStart != 0)
if (fnStart != 0) {
error("nested function found");
return false;
}
fnStart = start;
fnRes = asId(start + 2);
} else if (opCode == spv::Op::OpFunctionEnd) {
assert(fnRes != spv::NoResult);
if (fnStart == 0)
if (fnStart == 0) {
error("function end without function start");
return false;
}
fnPos[fnRes] = range_t(fnStart, start + asWordCount(start));
fnStart = 0;
} else if (isConstOp(opCode)) {
if (errorLatch)
return false;
assert(asId(start + 2) != spv::NoResult);
typeConstPos.insert(start);
typeConstPosR[asId(start + 2)] = start;
} else if (isTypeOp(opCode)) {
assert(asId(start + 1) != spv::NoResult);
typeConstPos.insert(start);
typeConstPosR[asId(start + 1)] = start;
}
return false;
@ -381,30 +492,37 @@ namespace spv {
{
msg(2, 2, std::string("validating: "));
if (spv.size() < header_size)
if (spv.size() < header_size) {
error("file too short: ");
return;
}
if (magic() != spv::MagicNumber)
if (magic() != spv::MagicNumber) {
error("bad magic number");
return;
}
// field 1 = version
// field 2 = generator magic
// field 3 = result <id> bound
if (schemaNum() != 0)
if (schemaNum() != 0) {
error("bad schema, must be 0");
return;
}
}
int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn)
{
const auto instructionStart = word;
const unsigned wordCount = asWordCount(instructionStart);
const spv::Op opCode = asOpCode(instructionStart);
const int nextInst = word++ + wordCount;
spv::Op opCode = asOpCode(instructionStart);
if (nextInst > int(spv.size()))
if (nextInst > int(spv.size())) {
error("spir instruction terminated too early");
return -1;
}
// Base for computing number of operands; will be updated as more is learned
unsigned numOperands = wordCount - 1;
@ -435,10 +553,31 @@ namespace spv {
return nextInst;
}
// Circular buffer so we can look back at previous unmapped values during the mapping pass.
static const unsigned idBufferSize = 4;
spv::Id idBuffer[idBufferSize];
unsigned idBufferPos = 0;
// Store IDs from instruction in our map
for (int op = 0; numOperands > 0; ++op, --numOperands) {
// SpecConstantOp is special: it includes the operands of another opcode which is
// given as a literal in the 3rd word. We will switch over to pretending that the
// opcode being processed is the literal opcode value of the SpecConstantOp. See the
// SPIRV spec for details. This way we will handle IDs and literals as appropriate for
// the embedded op.
if (opCode == spv::OpSpecConstantOp) {
if (op == 0) {
opCode = asOpCode(word++); // this is the opcode embedded in the SpecConstantOp.
--numOperands;
}
}
switch (spv::InstructionDesc[opCode].operands.getClass(op)) {
case spv::OperandId:
case spv::OperandScope:
case spv::OperandMemorySemantics:
idBuffer[idBufferPos] = asId(word);
idBufferPos = (idBufferPos + 1) % idBufferSize;
idFn(asId(word++));
break;
@ -456,13 +595,28 @@ namespace spv {
// word += numOperands;
return nextInst;
case spv::OperandVariableLiteralId:
while (numOperands > 0) {
++word; // immediate
idFn(asId(word++)); // ID
numOperands -= 2;
case spv::OperandVariableLiteralId: {
if (opCode == OpSwitch) {
// word-2 is the position of the selector ID. OpSwitch Literals match its type.
// In case the IDs are currently being remapped, we get the word[-2] ID from
// the circular idBuffer.
const unsigned literalSizePos = (idBufferPos+idBufferSize-2) % idBufferSize;
const unsigned literalSize = idTypeSizeInWords(idBuffer[literalSizePos]);
const unsigned numLiteralIdPairs = (nextInst-word) / (1+literalSize);
if (errorLatch)
return -1;
for (unsigned arg=0; arg<numLiteralIdPairs; ++arg) {
word += literalSize; // literal
idFn(asId(word++)); // label
}
} else {
assert(0); // currentely, only OpSwitch uses OperandVariableLiteralId
}
return nextInst;
}
case spv::OperandLiteralString: {
const int stringWordCount = literalStringWords(literalString(word));
@ -499,9 +653,7 @@ namespace spv {
case spv::OperandSelect:
case spv::OperandLoop:
case spv::OperandFunction:
case spv::OperandMemorySemantics:
case spv::OperandMemoryAccess:
case spv::OperandScope:
case spv::OperandGroupOperation:
case spv::OperandKernelEnqueueFlags:
case spv::OperandKernelProfilingInfo:
@ -531,9 +683,13 @@ namespace spv {
// basic parsing and InstructionDesc table borrowed from SpvDisassemble.cpp...
unsigned nextInst = unsigned(spv.size());
for (unsigned word = begin; word < end; word = nextInst)
for (unsigned word = begin; word < end; word = nextInst) {
nextInst = processInstruction(word, instFn, idFn);
if (errorLatch)
return *this;
}
return *this;
}
@ -548,8 +704,11 @@ namespace spv {
for (const char c : name.first)
hashval = hashval * 1009 + c;
if (isOldIdUnmapped(name.second))
if (isOldIdUnmapped(name.second)) {
localId(name.second, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
if (errorLatch)
return;
}
}
}
@ -571,8 +730,11 @@ namespace spv {
[&](spv::Op, unsigned start) { instPos.push_back(start); return true; },
op_fn_nop);
if (errorLatch)
return;
// Window size for context-sensitive canonicalization values
// Emperical best size from a single data set. TODO: Would be a good tunable.
// Empirical best size from a single data set. TODO: Would be a good tunable.
// We essentially perform a little convolution around each instruction,
// to capture the flavor of nearby code, to hopefully match to similar
// code in other modules.
@ -606,8 +768,12 @@ namespace spv {
hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
}
if (isOldIdUnmapped(resId))
if (isOldIdUnmapped(resId)) {
localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
if (errorLatch)
return;
}
}
}
}
@ -700,6 +866,9 @@ namespace spv {
[&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
);
if (errorLatch)
return;
// EXPERIMENTAL: Implicit output stores
fnLocalVars.clear();
idMap.clear();
@ -720,11 +889,17 @@ namespace spv {
},
op_fn_nop);
if (errorLatch)
return;
process(
inst_fn_nop,
[&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
);
if (errorLatch)
return;
strip(); // strip out data we decided to eliminate
}
@ -816,7 +991,7 @@ namespace spv {
},
// If local var id used anywhere else, don't eliminate
[&](spv::Id& id) {
[&](spv::Id& id) {
if (fnLocalVars.count(id) > 0) {
fnLocalVars.erase(id);
idMap.erase(id);
@ -824,6 +999,9 @@ namespace spv {
}
);
if (errorLatch)
return;
process(
[&](spv::Op opCode, unsigned start) {
if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0)
@ -832,6 +1010,9 @@ namespace spv {
},
op_fn_nop);
if (errorLatch)
return;
// Chase replacements to their origins, in case there is a chain such as:
// 2 = store 1
// 3 = load 2
@ -865,6 +1046,9 @@ namespace spv {
}
);
if (errorLatch)
return;
strip(); // strip out data we decided to eliminate
}
@ -890,7 +1074,6 @@ namespace spv {
if (call_it == fnCalls.end() || call_it->second == 0) {
changed = true;
stripRange.push_back(fn->second);
fnPosDCE.insert(*fn);
// decrease counts of called functions
process(
@ -909,6 +1092,9 @@ namespace spv {
fn->second.first,
fn->second.second);
if (errorLatch)
return;
fn = fnPos.erase(fn);
} else ++fn;
}
@ -925,21 +1111,37 @@ namespace spv {
// Count function variable use
process(
[&](spv::Op opCode, unsigned start) {
if (opCode == spv::OpVariable) { ++varUseCount[asId(start+2)]; return true; }
return false;
if (opCode == spv::OpVariable) {
++varUseCount[asId(start+2)];
return true;
} else if (opCode == spv::OpEntryPoint) {
const int wordCount = asWordCount(start);
for (int i = 4; i < wordCount; i++) {
++varUseCount[asId(start+i)];
}
return true;
} else
return false;
},
[&](spv::Id& id) { if (varUseCount[id]) ++varUseCount[id]; }
);
if (errorLatch)
return;
// Remove single-use function variables + associated decorations and names
process(
[&](spv::Op opCode, unsigned start) {
if ((opCode == spv::OpVariable && varUseCount[asId(start+2)] == 1) ||
(opCode == spv::OpDecorate && varUseCount[asId(start+1)] == 1) ||
(opCode == spv::OpName && varUseCount[asId(start+1)] == 1)) {
stripInst(start);
}
spv::Id id = spv::NoResult;
if (opCode == spv::OpVariable)
id = asId(start+2);
if (opCode == spv::OpDecorate || opCode == spv::OpName)
id = asId(start+1);
if (id != spv::NoResult && varUseCount[id] == 1)
stripInst(start);
return true;
},
op_fn_nop);
@ -956,28 +1158,37 @@ namespace spv {
std::unordered_map<spv::Id, int> typeUseCount;
// Count total type usage
process(inst_fn_nop,
[&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; }
);
// This is not the most efficient algorithm, but this is an offline tool, and
// it's easy to write this way. Can be improved opportunistically if needed.
bool changed = true;
while (changed) {
changed = false;
strip();
typeUseCount.clear();
// Remove types from deleted code
for (const auto& fn : fnPosDCE)
// Count total type usage
process(inst_fn_nop,
[&](spv::Id& id) { if (isType[id]) --typeUseCount[id]; },
fn.second.first, fn.second.second);
[&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; }
);
// Remove single reference types
for (const auto typeStart : typeConstPos) {
const spv::Id typeId = asTypeConstId(typeStart);
if (typeUseCount[typeId] == 1) {
--typeUseCount[typeId];
stripInst(typeStart);
if (errorLatch)
return;
// Remove single reference types
for (const auto typeStart : typeConstPos) {
const spv::Id typeId = asTypeConstId(typeStart);
if (typeUseCount[typeId] == 1) {
changed = true;
--typeUseCount[typeId];
stripInst(typeStart);
}
}
if (errorLatch)
return;
}
}
#ifdef NOTDEF
bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const
{
@ -1037,7 +1248,6 @@ namespace spv {
}
}
// Look for an equivalent type in the globalTypes map
spv::Id spirvbin_t::findType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt) const
{
@ -1050,12 +1260,14 @@ namespace spv {
}
#endif // NOTDEF
// Return start position in SPV of given type. error if not found.
unsigned spirvbin_t::typePos(spv::Id id) const
// Return start position in SPV of given Id. error if not found.
unsigned spirvbin_t::idPos(spv::Id id) const
{
const auto tid_it = typeConstPosR.find(id);
if (tid_it == typeConstPosR.end())
error("type ID not found");
const auto tid_it = idPosR.find(id);
if (tid_it == idPosR.end()) {
error("ID not found");
return 0;
}
return tid_it->second;
}
@ -1073,11 +1285,11 @@ namespace spv {
case spv::OpTypeInt: return 3 + (spv[typeStart+3]);
case spv::OpTypeFloat: return 5;
case spv::OpTypeVector:
return 6 + hashType(typePos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
return 6 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
case spv::OpTypeMatrix:
return 30 + hashType(typePos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
return 30 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
case spv::OpTypeImage:
return 120 + hashType(typePos(spv[typeStart+2])) +
return 120 + hashType(idPos(spv[typeStart+2])) +
spv[typeStart+3] + // dimensionality
spv[typeStart+4] * 8 * 16 + // depth
spv[typeStart+5] * 4 * 16 + // arrayed
@ -1088,24 +1300,24 @@ namespace spv {
case spv::OpTypeSampledImage:
return 502;
case spv::OpTypeArray:
return 501 + hashType(typePos(spv[typeStart+2])) * spv[typeStart+3];
return 501 + hashType(idPos(spv[typeStart+2])) * spv[typeStart+3];
case spv::OpTypeRuntimeArray:
return 5000 + hashType(typePos(spv[typeStart+2]));
return 5000 + hashType(idPos(spv[typeStart+2]));
case spv::OpTypeStruct:
{
std::uint32_t hash = 10000;
for (unsigned w=2; w < wordCount; ++w)
hash += w * hashType(typePos(spv[typeStart+w]));
hash += w * hashType(idPos(spv[typeStart+w]));
return hash;
}
case spv::OpTypeOpaque: return 6000 + spv[typeStart+2];
case spv::OpTypePointer: return 100000 + hashType(typePos(spv[typeStart+3]));
case spv::OpTypePointer: return 100000 + hashType(idPos(spv[typeStart+3]));
case spv::OpTypeFunction:
{
std::uint32_t hash = 200000;
for (unsigned w=2; w < wordCount; ++w)
hash += w * hashType(typePos(spv[typeStart+w]));
hash += w * hashType(idPos(spv[typeStart+w]));
return hash;
}
@ -1122,14 +1334,14 @@ namespace spv {
case spv::OpConstantFalse: return 300008;
case spv::OpConstantComposite:
{
std::uint32_t hash = 300011 + hashType(typePos(spv[typeStart+1]));
std::uint32_t hash = 300011 + hashType(idPos(spv[typeStart+1]));
for (unsigned w=3; w < wordCount; ++w)
hash += w * hashType(typePos(spv[typeStart+w]));
hash += w * hashType(idPos(spv[typeStart+w]));
return hash;
}
case spv::OpConstant:
{
std::uint32_t hash = 400011 + hashType(typePos(spv[typeStart+1]));
std::uint32_t hash = 400011 + hashType(idPos(spv[typeStart+1]));
for (unsigned w=3; w < wordCount; ++w)
hash += w * spv[typeStart+w];
return hash;
@ -1154,12 +1366,17 @@ namespace spv {
const spv::Id resId = asTypeConstId(typeStart);
const std::uint32_t hashval = hashType(typeStart);
if (isOldIdUnmapped(resId))
if (errorLatch)
return;
if (isOldIdUnmapped(resId)) {
localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
if (errorLatch)
return;
}
}
}
// Strip a single binary by removing ranges given in stripRange
void spirvbin_t::strip()
{
@ -1175,7 +1392,7 @@ namespace spv {
int strippedPos = 0;
for (unsigned word = 0; word < unsigned(spv.size()); ++word) {
if (strip_it != stripRange.end() && word >= strip_it->second)
while (strip_it != stripRange.end() && word >= strip_it->second)
++strip_it;
if (strip_it == stripRange.end() || word < strip_it->first || word >= strip_it->second)
@ -1196,25 +1413,56 @@ namespace spv {
// Set up opcode tables from SpvDoc
spv::Parameterize();
validate(); // validate header
buildLocalMaps();
validate(); // validate header
buildLocalMaps(); // build ID maps
msg(3, 4, std::string("ID bound: ") + std::to_string(bound()));
if (options & STRIP) stripDebug();
if (errorLatch) return;
strip(); // strip out data we decided to eliminate
if (errorLatch) return;
if (options & OPT_LOADSTORE) optLoadStore();
if (options & OPT_FWD_LS) forwardLoadStores();
if (options & DCE_FUNCS) dceFuncs();
if (options & DCE_VARS) dceVars();
if (options & DCE_TYPES) dceTypes();
if (options & MAP_TYPES) mapTypeConst();
if (options & MAP_NAMES) mapNames();
if (options & MAP_FUNCS) mapFnBodies();
if (errorLatch) return;
mapRemainder(); // map any unmapped IDs
applyMap(); // Now remap each shader to the new IDs we've come up with
strip(); // strip out data we decided to eliminate
if (options & OPT_FWD_LS) forwardLoadStores();
if (errorLatch) return;
if (options & DCE_FUNCS) dceFuncs();
if (errorLatch) return;
if (options & DCE_VARS) dceVars();
if (errorLatch) return;
if (options & DCE_TYPES) dceTypes();
if (errorLatch) return;
strip(); // strip out data we decided to eliminate
if (errorLatch) return;
stripDeadRefs(); // remove references to things we DCEed
if (errorLatch) return;
// after the last strip, we must clean any debug info referring to now-deleted data
if (options & MAP_TYPES) mapTypeConst();
if (errorLatch) return;
if (options & MAP_NAMES) mapNames();
if (errorLatch) return;
if (options & MAP_FUNCS) mapFnBodies();
if (errorLatch) return;
if (options & MAP_ALL) {
mapRemainder(); // map any unmapped IDs
if (errorLatch) return;
applyMap(); // Now remap each shader to the new IDs we've come up with
if (errorLatch) return;
}
}
// remap from a memory image

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2015 LunarG, Inc.
// Copyright (C) 2015 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,18 +19,18 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef SPIRVREMAPPER_H
@ -38,7 +38,8 @@
#include <string>
#include <vector>
#include <stdlib.h>
#include <cstdlib>
#include <exception>
namespace spv {
@ -74,7 +75,8 @@ public:
} // namespace SPV
#if !defined (use_cpp11)
#include <stdio.h>
#include <cstdio>
#include <cstdint>
namespace spv {
class spirvbin_t : public spirvbin_base_t
@ -82,7 +84,7 @@ class spirvbin_t : public spirvbin_base_t
public:
spirvbin_t(int /*verbose = 0*/) { }
void remap(std::vector<unsigned int>& /*spv*/, unsigned int /*opts = 0*/)
void remap(std::vector<std::uint32_t>& /*spv*/, unsigned int /*opts = 0*/)
{
printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n");
exit(5);
@ -110,8 +112,11 @@ namespace spv {
class spirvbin_t : public spirvbin_base_t
{
public:
spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose) { }
spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false)
{ }
virtual ~spirvbin_t() { }
// remap on an existing binary in memory
void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
@ -159,17 +164,22 @@ private:
typedef std::set<int> posmap_t;
typedef std::unordered_map<spv::Id, int> posmap_rev_t;
// handle error
void error(const std::string& txt) const { errorHandler(txt); }
// Maps and ID to the size of its base type, if known.
typedef std::unordered_map<spv::Id, unsigned> typesize_map_t;
// handle error
void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); }
bool isConstOp(spv::Op opCode) const;
bool isTypeOp(spv::Op opCode) const;
bool isStripOp(spv::Op opCode) const;
bool isFlowCtrl(spv::Op opCode) const;
range_t literalRange(spv::Op opCode) const;
range_t typeRange(spv::Op opCode) const;
range_t constRange(spv::Op opCode) const;
unsigned typeSizeInWords(spv::Id id) const;
unsigned idTypeSizeInWords(spv::Id id) const;
bool isConstOp(spv::Op opCode) const;
bool isTypeOp(spv::Op opCode) const;
bool isStripOp(spv::Op opCode) const;
bool isFlowCtrl(spv::Op opCode) const;
range_t literalRange(spv::Op opCode) const;
range_t typeRange(spv::Op opCode) const;
range_t constRange(spv::Op opCode) const;
spv::Id& asId(unsigned word) { return spv[word]; }
const spv::Id& asId(unsigned word) const { return spv[word]; }
spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); }
@ -177,10 +187,10 @@ private:
spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); }
unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); }
spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); }
unsigned typePos(spv::Id id) const;
unsigned idPos(spv::Id id) const;
static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); }
static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); }
// Header access & set methods
spirword_t magic() const { return spv[0]; } // return magic number
@ -233,9 +243,10 @@ private:
void applyMap(); // remap per local name map
void mapRemainder(); // map any IDs we haven't touched yet
void stripDebug(); // strip debug info
void stripDebug(); // strip all debug info
void stripDeadRefs(); // strips debug info for now-dead references after DCE
void strip(); // remove debug symbols
std::vector<spirword_t> spv; // SPIR words
namemap_t nameMap; // ID names from OpName
@ -258,14 +269,14 @@ private:
// Function start and end. use unordered_map because we'll have
// many fewer functions than IDs.
std::unordered_map<spv::Id, range_t> fnPos;
std::unordered_map<spv::Id, range_t> fnPosDCE; // deleted functions
// Which functions are called, anywhere in the module, with a call count
std::unordered_map<spv::Id, int> fnCalls;
posmap_t typeConstPos; // word positions that define types & consts (ordered)
posmap_rev_t typeConstPosR; // reverse map from IDs to positions
posmap_t typeConstPos; // word positions that define types & consts (ordered)
posmap_rev_t idPosR; // reverse map from IDs to positions
typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs
spv::Id entryPoint; // module entry point
@ -278,6 +289,11 @@ private:
std::uint32_t options;
int verbose; // verbosity level
// Error latch: this is set if the error handler is ever executed. It would be better to
// use a try/catch block and throw, but that's not desired for certain environments, so
// this is the alternative.
mutable bool errorLatch;
static errorfn_t errorHandler;
static logfn_t logHandler;
};

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,12 @@
//
//Copyright (C) 2014-2015 LunarG, Inc.
//Copyright (C) 2015-2016 Google, Inc.
// Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2015-2016 Google, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -20,22 +20,18 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
//
// Author: John Kessenich, LunarG
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// "Builder" is an interface to fully build SPIR-V IR. Allocate one of
@ -49,20 +45,22 @@
#ifndef SpvBuilder_H
#define SpvBuilder_H
#include "Logger.h"
#include "spirv.hpp"
#include "spvIR.h"
#include <algorithm>
#include <memory>
#include <stack>
#include <map>
#include <memory>
#include <set>
#include <sstream>
#include <stack>
namespace spv {
class Builder {
public:
Builder(unsigned int userNumber);
Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger);
virtual ~Builder();
static const int maxMatrixSize = 4;
@ -72,7 +70,18 @@ public:
source = lang;
sourceVersion = version;
}
void addSourceExtension(const char* ext) { extensions.push_back(ext); }
void setSourceFile(const std::string& file)
{
Instruction* fileString = new Instruction(getUniqueId(), NoType, OpString);
fileString->addStringOperand(file.c_str());
sourceFileStringId = fileString->getResultId();
strings.push_back(std::unique_ptr<Instruction>(fileString));
}
void setSourceText(const std::string& text) { sourceText = text; }
void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); }
void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
void setEmitOpLines() { emitOpLines = true; }
void addExtension(const char* ext) { extensions.insert(ext); }
Id import(const char*);
void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem)
{
@ -93,7 +102,11 @@ public:
return id;
}
Module* getModule() { return &module; }
// Log the current line, and if different than the last one,
// issue a new OpLine, using the current file name.
void setLine(int line);
// Low-level OpLine. See setLine() for a layered helper.
void addLine(Id fileName, int line, int column);
// For creating new types (will return old type if the requested one was already made).
Id makeVoidType();
@ -137,6 +150,9 @@ public:
bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
bool isBoolType(Id typeId) const { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); }
bool isIntType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) != 0; }
bool isUintType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) == 0; }
bool isFloatType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat; }
bool isPointerType(Id typeId) const { return getTypeClass(typeId) == OpTypePointer; }
bool isScalarType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat || getTypeClass(typeId) == OpTypeInt || getTypeClass(typeId) == OpTypeBool; }
bool isVectorType(Id typeId) const { return getTypeClass(typeId) == OpTypeVector; }
@ -149,11 +165,20 @@ public:
bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
bool isConstantOpCode(Op opcode) const;
bool isSpecConstantOpCode(Op opcode) const;
bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); }
bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; }
bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); }
unsigned int getConstantScalar(Id resultId) const { return module.getInstruction(resultId)->getImmediateOperand(0); }
StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); }
int getScalarTypeWidth(Id typeId) const
{
Id scalarTypeId = getScalarTypeId(typeId);
assert(getTypeClass(scalarTypeId) == OpTypeInt || getTypeClass(scalarTypeId) == OpTypeFloat);
return module.getInstruction(scalarTypeId)->getImmediateOperand(0);
}
int getTypeNumColumns(Id typeId) const
{
assert(isMatrixType(typeId));
@ -188,8 +213,17 @@ public:
Id makeBoolConstant(bool b, bool specConstant = false);
Id makeIntConstant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(32), (unsigned)i, specConstant); }
Id makeUintConstant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(32), u, specConstant); }
Id makeInt64Constant(long long i, bool specConstant = false) { return makeInt64Constant(makeIntType(64), (unsigned long long)i, specConstant); }
Id makeUint64Constant(unsigned long long u, bool specConstant = false) { return makeInt64Constant(makeUintType(64), u, specConstant); }
#ifdef AMD_EXTENSIONS
Id makeInt16Constant(short i, bool specConstant = false) { return makeIntConstant(makeIntType(16), (unsigned)((unsigned short)i), specConstant); }
Id makeUint16Constant(unsigned short u, bool specConstant = false) { return makeIntConstant(makeUintType(16), (unsigned)u, specConstant); }
#endif
Id makeFloatConstant(float f, bool specConstant = false);
Id makeDoubleConstant(double d, bool specConstant = false);
#ifdef AMD_EXTENSIONS
Id makeFloat16Constant(float f16, bool specConstant = false);
#endif
// Turn the array of constants into a proper spv constant of the requested type.
Id makeCompositeConstant(Id type, const std::vector<Id>& comps, bool specConst = false);
@ -199,7 +233,6 @@ public:
void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1);
void addName(Id, const char* name);
void addMemberName(Id, int member, const char* name);
void addLine(Id target, Id fileName, int line, int column);
void addDecoration(Id, Decoration, int num = -1);
void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1);
@ -207,15 +240,15 @@ public:
void setBuildPoint(Block* bp) { buildPoint = bp; }
Block* getBuildPoint() const { return buildPoint; }
// Make the main function. The returned pointer is only valid
// Make the entry-point function. The returned pointer is only valid
// for the lifetime of this builder.
Function* makeMain();
Function* makeEntryPoint(const char*);
// Make a shader-style function, and create its entry block if entry is non-zero.
// Return the function, pass back the entry.
// The returned pointer is only valid for the lifetime of this builder.
Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, const std::vector<Id>& paramTypes,
const std::vector<Decoration>& precisions, Block **entry = 0);
const std::vector<std::vector<Decoration>>& precisions, Block **entry = 0);
// Create a return. An 'implicit' return is one not appearing in the source
// code. In the case of an implicit return, no post-return block is inserted.
@ -247,7 +280,7 @@ public:
// Create an OpCompositeExtract instruction
Id createCompositeExtract(Id composite, Id typeId, unsigned index);
Id createCompositeExtract(Id composite, Id typeId, std::vector<unsigned>& indexes);
Id createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes);
Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index);
Id createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes);
@ -264,6 +297,7 @@ public:
Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3);
Id createOp(Op, Id typeId, const std::vector<Id>& operands);
Id createFunctionCall(spv::Function*, const std::vector<spv::Id>&);
Id createSpecConstantOp(Op, Id typeId, const std::vector<spv::Id>& operands, const std::vector<unsigned>& literals);
// Take an rvalue (source) and a set of channels to extract from it to
// make a new rvalue, which is returned.
@ -296,7 +330,7 @@ public:
// Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'.
// The type of the created vector is a vector of components of the same type as the scalar.
//
// Note: One of the arguments will change, with the result coming back that way rather than
// Note: One of the arguments will change, with the result coming back that way rather than
// through the return value.
void promoteScalar(Decoration precision, Id& left, Id& right);
@ -306,7 +340,7 @@ public:
Id smearScalar(Decoration precision, Id scalarVal, Id vectorType);
// Create a call to a built-in function.
Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, std::vector<Id>& args);
Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args);
// List of parameters used to create a texture operation
struct TextureParameters {
@ -320,7 +354,7 @@ public:
Id gradX;
Id gradY;
Id sample;
Id comp;
Id component;
Id texelOut;
Id lodClamp;
};
@ -330,7 +364,7 @@ public:
// Emit the OpTextureQuery* instruction that was passed in.
// Figure out the right return value and type, and return it.
Id createTextureQueryCall(Op, const TextureParameters&);
Id createTextureQueryCall(Op, const TextureParameters&, bool isUnsignedResult);
Id createSamplePositionCall(Decoration precision, Id, Id);
@ -352,7 +386,7 @@ public:
// Helper to use for building nested control flow with if-then-else.
class If {
public:
If(Id condition, Builder& builder);
If(Id condition, unsigned int ctrl, Builder& builder);
~If() {}
void makeBeginElse();
@ -364,6 +398,7 @@ public:
Builder& builder;
Id condition;
unsigned int control;
Function* function;
Block* headerBlock;
Block* thenBlock;
@ -383,8 +418,8 @@ public:
// Returns the right set of basic blocks to start each code segment with, so that the caller's
// recursion stack can hold the memory for it.
//
void makeSwitch(Id condition, int numSegments, const std::vector<int>& caseValues, const std::vector<int>& valueToSegment, int defaultSegment,
std::vector<Block*>& segmentBB); // return argument
void makeSwitch(Id condition, unsigned int control, int numSegments, const std::vector<int>& caseValues,
const std::vector<int>& valueToSegment, int defaultSegment, std::vector<Block*>& segmentBB); // return argument
// Add a branch to the innermost switch's merge block.
void addSwitchBreak();
@ -396,7 +431,12 @@ public:
void endSwitch(std::vector<Block*>& segmentBB);
struct LoopBlocks {
LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) :
head(head), body(body), merge(merge), continue_target(continue_target) { }
Block &head, &body, &merge, &continue_target;
private:
LoopBlocks();
LoopBlocks& operator=(const LoopBlocks&);
};
// Start a new loop and prepare the builder to generate code for it. Until
@ -460,7 +500,7 @@ public:
//
// the SPIR-V builder maintains a single active chain that
// the following methods operated on
// the following methods operate on
//
// for external save and restore
@ -493,12 +533,15 @@ public:
// push new swizzle onto the end of any existing swizzle, merging into a single swizzle
void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType);
// push a variable component selection onto the access chain; supporting only one, so unsided
// push a dynamic component selection onto the access chain, only applicable with a
// non-trivial swizzle or no swizzle
void accessChainPushComponent(Id component, Id preSwizzleBaseType)
{
accessChain.component = component;
if (accessChain.preSwizzleBaseType == NoType)
accessChain.preSwizzleBaseType = preSwizzleBaseType;
if (accessChain.swizzle.size() != 1) {
accessChain.component = component;
if (accessChain.preSwizzleBaseType == NoType)
accessChain.preSwizzleBaseType = preSwizzleBaseType;
}
}
// use accessChain and swizzle to store value
@ -514,27 +557,48 @@ public:
// based on the type of the base and the chain of dereferences.
Id accessChainGetInferredType();
// Remove OpDecorate instructions whose operands are defined in unreachable
// blocks.
void eliminateDeadDecorations();
void dump(std::vector<unsigned int>&) const;
void createBranch(Block* block);
void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, unsigned int dependencyLength);
void createSelectionMerge(Block* mergeBlock, unsigned int control);
// Sets to generate opcode for specialization constants.
void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
// Sets to generate opcode for non-specialization constants (normal mode).
void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; }
// Check if the builder is generating code for spec constants.
bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; }
protected:
Id makeIntConstant(Id typeId, unsigned value, bool specConstant);
Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value) const;
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2) const;
Id findCompositeConstant(Op typeClass, const std::vector<Id>& comps) const;
Id collapseAccessChain();
void remapDynamicSwizzle();
void transferAccessChainSwizzle(bool dynamic);
void simplifyAccessChainSwizzle();
void createAndSetNoPredecessorBlock(const char*);
void dumpSourceInstructions(std::vector<unsigned int>&) const;
void dumpInstructions(std::vector<unsigned int>&, const std::vector<std::unique_ptr<Instruction> >&) const;
void dumpModuleProcesses(std::vector<unsigned int>&) const;
unsigned int spvVersion; // the version of SPIR-V to emit in the header
SourceLanguage source;
int sourceVersion;
std::vector<const char*> extensions;
spv::Id sourceFileStringId;
std::string sourceText;
int currentLine;
bool emitOpLines;
std::set<std::string> extensions;
std::vector<const char*> sourceExtensions;
std::vector<const char*> moduleProcesses;
AddressingModel addressModel;
MemoryModel memoryModel;
std::set<spv::Capability> capabilities;
@ -542,10 +606,12 @@ public:
Module module;
Block* buildPoint;
Id uniqueId;
Function* mainFunction;
Function* entryPointFunction;
bool generatingOpCodeForSpecConst;
AccessChain accessChain;
// special blocks of instructions for output
std::vector<std::unique_ptr<Instruction> > strings;
std::vector<std::unique_ptr<Instruction> > imports;
std::vector<std::unique_ptr<Instruction> > entryPoints;
std::vector<std::unique_ptr<Instruction> > executionModes;
@ -565,14 +631,11 @@ public:
// Our loop stack.
std::stack<LoopBlocks> loops;
// The stream for outputting warnings and errors.
SpvBuildLogger* logger;
}; // end Builder class
// Use for non-fatal notes about what's not complete
void TbdFunctionality(const char*);
// Use for fatal missing functionality
void MissingFunctionality(const char*);
}; // end spv namespace
#endif // SpvBuilder_H

81
third_party/glslang-spirv/bitutils.h vendored Normal file
View File

@ -0,0 +1,81 @@
// Copyright (c) 2015-2016 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LIBSPIRV_UTIL_BITUTILS_H_
#define LIBSPIRV_UTIL_BITUTILS_H_
#include <cstdint>
#include <cstring>
namespace spvutils {
// Performs a bitwise copy of source to the destination type Dest.
template <typename Dest, typename Src>
Dest BitwiseCast(Src source) {
Dest dest;
static_assert(sizeof(source) == sizeof(dest),
"BitwiseCast: Source and destination must have the same size");
std::memcpy(&dest, &source, sizeof(dest));
return dest;
}
// SetBits<T, First, Num> returns an integer of type <T> with bits set
// for position <First> through <First + Num - 1>, counting from the least
// significant bit. In particular when Num == 0, no positions are set to 1.
// A static assert will be triggered if First + Num > sizeof(T) * 8, that is,
// a bit that will not fit in the underlying type is set.
template <typename T, size_t First = 0, size_t Num = 0>
struct SetBits {
static_assert(First < sizeof(T) * 8,
"Tried to set a bit that is shifted too far.");
const static T get = (T(1) << First) | SetBits<T, First + 1, Num - 1>::get;
};
template <typename T, size_t Last>
struct SetBits<T, Last, 0> {
const static T get = T(0);
};
// This is all compile-time so we can put our tests right here.
static_assert(SetBits<uint32_t, 0, 0>::get == uint32_t(0x00000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 1>::get == uint32_t(0x00000001),
"SetBits failed");
static_assert(SetBits<uint32_t, 31, 1>::get == uint32_t(0x80000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 1, 2>::get == uint32_t(0x00000006),
"SetBits failed");
static_assert(SetBits<uint32_t, 30, 2>::get == uint32_t(0xc0000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 31>::get == uint32_t(0x7FFFFFFF),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 32>::get == uint32_t(0xFFFFFFFF),
"SetBits failed");
static_assert(SetBits<uint32_t, 16, 16>::get == uint32_t(0xFFFF0000),
"SetBits failed");
static_assert(SetBits<uint64_t, 0, 1>::get == uint64_t(0x0000000000000001LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 63, 1>::get == uint64_t(0x8000000000000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 62, 2>::get == uint64_t(0xc000000000000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 31, 1>::get == uint64_t(0x0000000080000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 16, 16>::get == uint64_t(0x00000000FFFF0000LL),
"SetBits failed");
} // namespace spvutils
#endif // LIBSPIRV_UTIL_BITUTILS_H_

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2014-2015 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,47 +19,59 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
//
// Author: John Kessenich, LunarG
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Disassembler for SPIR-V.
//
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cstdlib>
#include <cstring>
#include <cassert>
#include <iomanip>
#include <stack>
#include <sstream>
#include <cstring>
namespace spv {
// Include C-based headers that don't have a namespace
#include "GLSL.std.450.h"
}
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
#include "disassemble.h"
#include "doc.h"
namespace spv {
extern "C" {
// Include C-based headers that don't have a namespace
#include "GLSL.std.450.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
void Kill(std::ostream& out, const char* message)
namespace spv {
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char*, unsigned);
#endif
static void Kill(std::ostream& out, const char* message)
{
out << std::endl << "Disassembly failed: " << message << std::endl;
exit(1);
@ -68,6 +80,12 @@ void Kill(std::ostream& out, const char* message)
// used to identify the extended instruction library imported when printing
enum ExtInstSet {
GLSL450Inst,
#ifdef AMD_EXTENSIONS
GLSLextAMDInst,
#endif
#ifdef NV_EXTENSIONS
GLSLextNVInst,
#endif
OpenCLExtInst,
};
@ -209,10 +227,12 @@ void SpirvStream::outputIndent()
void SpirvStream::formatId(Id id, std::stringstream& idStream)
{
if (id >= bound)
Kill(out, "Bad <id>");
if (id != 0) {
// On instructions with no IDs, this is called with "0", which does not
// have to be within ID bounds on null shaders.
if (id >= bound)
Kill(out, "Bad <id>");
idStream << id;
if (idDescriptor[id].size() > 0)
idStream << "(" << idDescriptor[id] << ")";
@ -326,7 +346,7 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
idDescriptor[resultId] = (const char*)(&stream[word]);
}
else {
if (idDescriptor[resultId].size() == 0) {
if (resultId != 0 && idDescriptor[resultId].size() == 0) {
switch (opCode) {
case OpTypeInt:
idDescriptor[resultId] = "int";
@ -450,14 +470,38 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
--numOperands;
if (opCode == OpExtInst) {
ExtInstSet extInstSet = GLSL450Inst;
if (0 == memcmp("OpenCL", (const char*)(idDescriptor[stream[word-2]].c_str()), 6)) {
const char* name = idDescriptor[stream[word - 2]].c_str();
if (0 == memcmp("OpenCL", name, 6)) {
extInstSet = OpenCLExtInst;
#ifdef AMD_EXTENSIONS
} else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
extInstSet = GLSLextAMDInst;
#endif
#ifdef NV_EXTENSIONS
}else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0) {
extInstSet = GLSLextNVInst;
#endif
}
unsigned entrypoint = stream[word - 1];
if (extInstSet == GLSL450Inst) {
if (entrypoint < GLSLstd450Count) {
out << "(" << GlslStd450DebugNames[entrypoint] << ")";
}
#ifdef AMD_EXTENSIONS
} else if (extInstSet == GLSLextAMDInst) {
out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
#endif
#ifdef NV_EXTENSIONS
}
else if (extInstSet == GLSLextNVInst) {
out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
#endif
}
}
break;
@ -481,7 +525,7 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
return;
}
void GLSLstd450GetDebugNames(const char** names)
static void GLSLstd450GetDebugNames(const char** names)
{
for (int i = 0; i < GLSLstd450Count; ++i)
names[i] = "Unknown";
@ -565,9 +609,84 @@ void GLSLstd450GetDebugNames(const char** names)
names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
}
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
switch (entrypoint) {
case SwizzleInvocationsAMD: return "SwizzleInvocationsAMD";
case SwizzleInvocationsMaskedAMD: return "SwizzleInvocationsMaskedAMD";
case WriteInvocationAMD: return "WriteInvocationAMD";
case MbcntAMD: return "MbcntAMD";
default: return "Bad";
}
} else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) {
switch (entrypoint) {
case FMin3AMD: return "FMin3AMD";
case UMin3AMD: return "UMin3AMD";
case SMin3AMD: return "SMin3AMD";
case FMax3AMD: return "FMax3AMD";
case UMax3AMD: return "UMax3AMD";
case SMax3AMD: return "SMax3AMD";
case FMid3AMD: return "FMid3AMD";
case UMid3AMD: return "UMid3AMD";
case SMid3AMD: return "SMid3AMD";
default: return "Bad";
}
} else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) {
switch (entrypoint) {
case InterpolateAtVertexAMD: return "InterpolateAtVertexAMD";
default: return "Bad";
}
}
else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) {
switch (entrypoint) {
case CubeFaceIndexAMD: return "CubeFaceIndexAMD";
case CubeFaceCoordAMD: return "CubeFaceCoordAMD";
case TimeAMD: return "TimeAMD";
default:
break;
}
}
return "Bad";
}
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0) {
switch (entrypoint) {
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
case BuiltInViewportMaskNV: return "ViewportMaskNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
case BuiltInPositionPerViewNV: return "PositionPerViewNV";
case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
default: return "Bad";
}
}
return "Bad";
}
#endif
void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
{
SpirvStream SpirvStream(out, stream);
spv::Parameterize();
GLSLstd450GetDebugNames(GlslStd450DebugNames);
SpirvStream.validate();
SpirvStream.processInstructions();

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2014-2015 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,22 +19,18 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
//
// Author: John Kessenich, LunarG
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Disassembler for SPIR-V.

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2014-2015 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,25 +19,21 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: John Kessenich, LunarG
//
//
// 1) Programatically fill in instruction/operand information.
// 1) Programmatically fill in instruction/operand information.
// This can be used for disassembly, printing documentation, etc.
//
// 2) Print documentation from this parameterization.
@ -45,10 +41,24 @@
#include "doc.h"
#include <stdio.h>
#include <string.h>
#include <cstdio>
#include <cstring>
#include <algorithm>
namespace spv {
extern "C" {
// Include C-based headers that don't have a namespace
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
namespace spv {
//
@ -59,12 +69,12 @@ namespace spv {
// Also, the ceilings are declared next to these, to help keep them in sync.
// Ceilings should be
// - one more than the maximum value an enumerant takes on, for non-mask enumerants
// (for non-sparse enums, this is the number of enumurants)
// (for non-sparse enums, this is the number of enumerants)
// - the number of bits consumed by the set of masks
// (for non-sparse mask enums, this is the number of enumurants)
// (for non-sparse mask enums, this is the number of enumerants)
//
const int SourceLanguageCeiling = 5;
const int SourceLanguageCeiling = 6; // HLSL todo: need official enumerant
const char* SourceString(int source)
{
@ -74,6 +84,7 @@ const char* SourceString(int source)
case 2: return "GLSL";
case 3: return "OpenCL_C";
case 4: return "OpenCL_CPP";
case 5: return "HLSL";
case SourceLanguageCeiling:
default: return "Bad";
@ -165,12 +176,13 @@ const char* ExecutionModeString(int mode)
case 31: return "ContractionOff";
case 32: return "Bad";
case 4446: return "PostDepthCoverage";
case ExecutionModeCeiling:
default: return "Bad";
}
}
const int StorageClassCeiling = 12;
const int StorageClassCeiling = 13;
const char* StorageClassString(int StorageClass)
{
@ -187,6 +199,7 @@ const char* StorageClassString(int StorageClass)
case 9: return "PushConstant";
case 10: return "AtomicCounter";
case 11: return "Image";
case 12: return "StorageBuffer";
case StorageClassCeiling:
default: return "Bad";
@ -246,6 +259,16 @@ const char* DecorationString(int decoration)
case DecorationCeiling:
default: return "Bad";
#ifdef AMD_EXTENSIONS
case 4999: return "ExplicitInterpAMD";
#endif
#ifdef NV_EXTENSIONS
case 5248: return "OverrideCoverageNV";
case 5250: return "PassthroughNV";
case 5252: return "ViewportRelativeNV";
case 5256: return "SecondaryViewportRelativeNV";
#endif
}
}
@ -299,6 +322,38 @@ const char* BuiltInString(int builtIn)
case 42: return "VertexIndex"; // TBD: put next to VertexId?
case 43: return "InstanceIndex"; // TBD: put next to InstanceId?
case 4416: return "SubgroupEqMaskKHR";
case 4417: return "SubgroupGeMaskKHR";
case 4418: return "SubgroupGtMaskKHR";
case 4419: return "SubgroupLeMaskKHR";
case 4420: return "SubgroupLtMaskKHR";
case 4438: return "DeviceIndex";
case 4440: return "ViewIndex";
case 4424: return "BaseVertex";
case 4425: return "BaseInstance";
case 4426: return "DrawIndex";
case 5014: return "FragStencilRefEXT";
#ifdef AMD_EXTENSIONS
case 4992: return "BaryCoordNoPerspAMD";
case 4993: return "BaryCoordNoPerspCentroidAMD";
case 4994: return "BaryCoordNoPerspSampleAMD";
case 4995: return "BaryCoordSmoothAMD";
case 4996: return "BaryCoordSmoothCentroidAMD";
case 4997: return "BaryCoordSmoothSampleAMD";
case 4998: return "BaryCoordPullModelAMD";
#endif
#ifdef NV_EXTENSIONS
case 5253: return "ViewportMaskNV";
case 5257: return "SecondaryPositionNV";
case 5258: return "SecondaryViewportMaskNV";
case 5261: return "PositionPerViewNV";
case 5262: return "ViewportMaskPerViewNV";
#endif
case 5264: return "FullyCoveredEXT";
case BuiltInCeiling:
default: return "Bad";
}
@ -586,13 +641,15 @@ const char* SelectControlString(int cont)
}
}
const int LoopControlCeiling = 2;
const int LoopControlCeiling = 4;
const char* LoopControlString(int cont)
{
switch (cont) {
case 0: return "Unroll";
case 1: return "DontUnroll";
case 2: return "DependencyInfinite";
case 3: return "DependencyLength";
case LoopControlCeiling:
default: return "Bad";
@ -777,6 +834,39 @@ const char* CapabilityString(int info)
case 56: return "StorageImageWriteWithoutFormat";
case 57: return "MultiViewport";
case 4423: return "SubgroupBallotKHR";
case 4427: return "DrawParameters";
case 4431: return "SubgroupVoteKHR";
case 4433: return "StorageUniformBufferBlock16";
case 4434: return "StorageUniform16";
case 4435: return "StoragePushConstant16";
case 4436: return "StorageInputOutput16";
case 4437: return "DeviceGroup";
case 4439: return "MultiView";
case 5013: return "StencilExportEXT";
#ifdef AMD_EXTENSIONS
case 5009: return "ImageGatherBiasLodAMD";
case 5010: return "FragmentMaskAMD";
case 5015: return "ImageReadWriteLodAMD";
#endif
case 4445: return "AtomicStorageOps";
case 4447: return "SampleMaskPostDepthCoverage";
#ifdef NV_EXTENSIONS
case 5251: return "GeometryShaderPassthroughNV";
case 5254: return "ShaderViewportIndexLayerNV";
case 5255: return "ShaderViewportMaskNV";
case 5259: return "ShaderStereoViewNV";
case 5260: return "PerViewAttributesNV";
#endif
case 5265: return "FragmentFullyCoveredEXT";
case CapabilityCeiling:
default: return "Bad";
}
@ -1107,6 +1197,27 @@ const char* OpcodeString(int op)
case 319: return "OpAtomicFlagClear";
case 320: return "OpImageSparseRead";
case 4421: return "OpSubgroupBallotKHR";
case 4422: return "OpSubgroupFirstInvocationKHR";
case 4428: return "OpSubgroupAllKHR";
case 4429: return "OpSubgroupAnyKHR";
case 4430: return "OpSubgroupAllEqualKHR";
case 4432: return "OpSubgroupReadInvocationKHR";
#ifdef AMD_EXTENSIONS
case 5000: return "OpGroupIAddNonUniformAMD";
case 5001: return "OpGroupFAddNonUniformAMD";
case 5002: return "OpGroupFMinNonUniformAMD";
case 5003: return "OpGroupUMinNonUniformAMD";
case 5004: return "OpGroupSMinNonUniformAMD";
case 5005: return "OpGroupFMaxNonUniformAMD";
case 5006: return "OpGroupUMaxNonUniformAMD";
case 5007: return "OpGroupSMaxNonUniformAMD";
case 5011: return "OpFragmentMaskFetchAMD";
case 5012: return "OpFragmentFetchAMD";
#endif
case OpcodeCeiling:
default:
return "Bad";
@ -1115,7 +1226,7 @@ const char* OpcodeString(int op)
// The set of objects that hold all the instruction/operand
// parameterization information.
InstructionParameters InstructionDesc[OpcodeCeiling];
InstructionParameters InstructionDesc[OpCodeMask + 1];
OperandParameters ExecutionModeOperands[ExecutionModeCeiling];
OperandParameters DecorationOperands[DecorationCeiling];
@ -2447,6 +2558,7 @@ void Parameterize()
InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Merge Block'");
InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Continue Target'");
InstructionDesc[OpLoopMerge].operands.push(OperandLoop, "");
InstructionDesc[OpLoopMerge].operands.push(OperandOptionalLiteral, "");
InstructionDesc[OpSelectionMerge].operands.push(OperandId, "'Merge Block'");
InstructionDesc[OpSelectionMerge].operands.push(OperandSelect, "");
@ -2706,6 +2818,77 @@ void Parameterize()
InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Num Events'");
InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Wait Events'");
InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Ret Event'");
InstructionDesc[OpSubgroupBallotKHR].operands.push(OperandId, "'Predicate'");
InstructionDesc[OpSubgroupFirstInvocationKHR].operands.push(OperandId, "'Value'");
InstructionDesc[OpSubgroupAnyKHR].capabilities.push_back(CapabilitySubgroupVoteKHR);
InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandId, "'Predicate'");
InstructionDesc[OpSubgroupAllKHR].capabilities.push_back(CapabilitySubgroupVoteKHR);
InstructionDesc[OpSubgroupAllKHR].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpSubgroupAllKHR].operands.push(OperandId, "'Predicate'");
InstructionDesc[OpSubgroupAllEqualKHR].capabilities.push_back(CapabilitySubgroupVoteKHR);
InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandId, "'Predicate'");
InstructionDesc[OpSubgroupReadInvocationKHR].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Value'");
InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Index'");
#ifdef AMD_EXTENSIONS
InstructionDesc[OpGroupIAddNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
InstructionDesc[OpGroupFAddNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandId, "'X'");
InstructionDesc[OpGroupUMinNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandId, "'X'");
InstructionDesc[OpGroupSMinNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandId, "X");
InstructionDesc[OpGroupFMinNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandId, "X");
InstructionDesc[OpGroupUMaxNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandId, "X");
InstructionDesc[OpGroupSMaxNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandId, "X");
InstructionDesc[OpGroupFMaxNonUniformAMD].capabilities.push_back(CapabilityGroups);
InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandId, "X");
InstructionDesc[OpFragmentMaskFetchAMD].capabilities.push_back(CapabilityFragmentMaskAMD);
InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Image'");
InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Coordinate'");
InstructionDesc[OpFragmentFetchAMD].capabilities.push_back(CapabilityFragmentMaskAMD);
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
#endif
}
}; // end spv namespace

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2014-2015 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,27 +19,25 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
//
// Author: John Kessenich, LunarG
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Parameterize the SPIR-V enumerants.
//
#pragma once
#include "spirv.hpp"
#include <vector>
@ -67,6 +65,8 @@ const char* SamplerFilterModeString(int);
const char* ImageFormatString(int);
const char* ImageChannelOrderString(int);
const char* ImageChannelTypeString(int);
const char* ImageChannelDataTypeString(int type);
const char* ImageOperandsString(int format);
const char* ImageOperands(int);
const char* FPFastMathString(int);
const char* FPRoundingModeString(int);
@ -81,6 +81,7 @@ const char* KernelEnqueueFlagsString(int);
const char* KernelProfilingInfoString(int);
const char* CapabilityString(int);
const char* OpcodeString(int);
const char* ScopeString(int mem);
// For grouping opcodes into subsections
enum OpcodeClass {
@ -150,7 +151,7 @@ enum OperandClass {
OperandMemorySemantics,
OperandMemoryAccess,
OperandScope,
OperandGroupOperation,
OperandGroupOperation,
OperandKernelEnqueueFlags,
OperandKernelProfilingInfo,
OperandCapability,

1078
third_party/glslang-spirv/hex_float.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// Copyright (c) 2014-2016 The Khronos Group Inc.
// Copyright (c) 2014-2018 The Khronos Group Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and/or associated documentation files (the "Materials"),
@ -46,11 +46,11 @@ namespace spv {
typedef unsigned int Id;
#define SPV_VERSION 0x10000
#define SPV_VERSION 0x10200
#define SPV_REVISION 3
static const unsigned int MagicNumber = 0x07230203;
static const unsigned int Version = 0x00010000;
static const unsigned int Version = 0x00010200;
static const unsigned int Revision = 3;
static const unsigned int OpCodeMask = 0xffff;
static const unsigned int WordCountShift = 16;
@ -61,6 +61,8 @@ enum SourceLanguage {
SourceLanguageGLSL = 2,
SourceLanguageOpenCL_C = 3,
SourceLanguageOpenCL_CPP = 4,
SourceLanguageHLSL = 5,
SourceLanguageMax = 0x7fffffff,
};
enum ExecutionModel {
@ -71,18 +73,21 @@ enum ExecutionModel {
ExecutionModelFragment = 4,
ExecutionModelGLCompute = 5,
ExecutionModelKernel = 6,
ExecutionModelMax = 0x7fffffff,
};
enum AddressingModel {
AddressingModelLogical = 0,
AddressingModelPhysical32 = 1,
AddressingModelPhysical64 = 2,
AddressingModelMax = 0x7fffffff,
};
enum MemoryModel {
MemoryModelSimple = 0,
MemoryModelGLSL450 = 1,
MemoryModelOpenCL = 2,
MemoryModelMax = 0x7fffffff,
};
enum ExecutionMode {
@ -117,6 +122,16 @@ enum ExecutionMode {
ExecutionModeOutputTriangleStrip = 29,
ExecutionModeVecTypeHint = 30,
ExecutionModeContractionOff = 31,
ExecutionModeInitializer = 33,
ExecutionModeFinalizer = 34,
ExecutionModeSubgroupSize = 35,
ExecutionModeSubgroupsPerWorkgroup = 36,
ExecutionModeSubgroupsPerWorkgroupId = 37,
ExecutionModeLocalSizeId = 38,
ExecutionModeLocalSizeHintId = 39,
ExecutionModePostDepthCoverage = 4446,
ExecutionModeStencilRefReplacingEXT = 5027,
ExecutionModeMax = 0x7fffffff,
};
enum StorageClass {
@ -132,6 +147,8 @@ enum StorageClass {
StorageClassPushConstant = 9,
StorageClassAtomicCounter = 10,
StorageClassImage = 11,
StorageClassStorageBuffer = 12,
StorageClassMax = 0x7fffffff,
};
enum Dim {
@ -142,6 +159,7 @@ enum Dim {
DimRect = 4,
DimBuffer = 5,
DimSubpassData = 6,
DimMax = 0x7fffffff,
};
enum SamplerAddressingMode {
@ -150,11 +168,13 @@ enum SamplerAddressingMode {
SamplerAddressingModeClamp = 2,
SamplerAddressingModeRepeat = 3,
SamplerAddressingModeRepeatMirrored = 4,
SamplerAddressingModeMax = 0x7fffffff,
};
enum SamplerFilterMode {
SamplerFilterModeNearest = 0,
SamplerFilterModeLinear = 1,
SamplerFilterModeMax = 0x7fffffff,
};
enum ImageFormat {
@ -198,6 +218,7 @@ enum ImageFormat {
ImageFormatRg8ui = 37,
ImageFormatR16ui = 38,
ImageFormatR8ui = 39,
ImageFormatMax = 0x7fffffff,
};
enum ImageChannelOrder {
@ -220,6 +241,8 @@ enum ImageChannelOrder {
ImageChannelOrdersRGBx = 16,
ImageChannelOrdersRGBA = 17,
ImageChannelOrdersBGRA = 18,
ImageChannelOrderABGR = 19,
ImageChannelOrderMax = 0x7fffffff,
};
enum ImageChannelDataType {
@ -240,6 +263,7 @@ enum ImageChannelDataType {
ImageChannelDataTypeFloat = 14,
ImageChannelDataTypeUnormInt24 = 15,
ImageChannelDataTypeUnormInt101010_2 = 16,
ImageChannelDataTypeMax = 0x7fffffff,
};
enum ImageOperandsShift {
@ -251,6 +275,7 @@ enum ImageOperandsShift {
ImageOperandsConstOffsetsShift = 5,
ImageOperandsSampleShift = 6,
ImageOperandsMinLodShift = 7,
ImageOperandsMax = 0x7fffffff,
};
enum ImageOperandsMask {
@ -271,6 +296,7 @@ enum FPFastMathModeShift {
FPFastMathModeNSZShift = 2,
FPFastMathModeAllowRecipShift = 3,
FPFastMathModeFastShift = 4,
FPFastMathModeMax = 0x7fffffff,
};
enum FPFastMathModeMask {
@ -287,17 +313,20 @@ enum FPRoundingMode {
FPRoundingModeRTZ = 1,
FPRoundingModeRTP = 2,
FPRoundingModeRTN = 3,
FPRoundingModeMax = 0x7fffffff,
};
enum LinkageType {
LinkageTypeExport = 0,
LinkageTypeImport = 1,
LinkageTypeMax = 0x7fffffff,
};
enum AccessQualifier {
AccessQualifierReadOnly = 0,
AccessQualifierWriteOnly = 1,
AccessQualifierReadWrite = 2,
AccessQualifierMax = 0x7fffffff,
};
enum FunctionParameterAttribute {
@ -309,6 +338,7 @@ enum FunctionParameterAttribute {
FunctionParameterAttributeNoCapture = 5,
FunctionParameterAttributeNoWrite = 6,
FunctionParameterAttributeNoReadWrite = 7,
FunctionParameterAttributeMax = 0x7fffffff,
};
enum Decoration {
@ -355,6 +385,15 @@ enum Decoration {
DecorationNoContraction = 42,
DecorationInputAttachmentIndex = 43,
DecorationAlignment = 44,
DecorationMaxByteOffset = 45,
DecorationAlignmentId = 46,
DecorationMaxByteOffsetId = 47,
DecorationExplicitInterpAMD = 4999,
DecorationOverrideCoverageNV = 5248,
DecorationPassthroughNV = 5250,
DecorationViewportRelativeNV = 5252,
DecorationSecondaryViewportRelativeNV = 5256,
DecorationMax = 0x7fffffff,
};
enum BuiltIn {
@ -399,11 +438,37 @@ enum BuiltIn {
BuiltInSubgroupLocalInvocationId = 41,
BuiltInVertexIndex = 42,
BuiltInInstanceIndex = 43,
BuiltInSubgroupEqMaskKHR = 4416,
BuiltInSubgroupGeMaskKHR = 4417,
BuiltInSubgroupGtMaskKHR = 4418,
BuiltInSubgroupLeMaskKHR = 4419,
BuiltInSubgroupLtMaskKHR = 4420,
BuiltInBaseVertex = 4424,
BuiltInBaseInstance = 4425,
BuiltInDrawIndex = 4426,
BuiltInDeviceIndex = 4438,
BuiltInViewIndex = 4440,
BuiltInBaryCoordNoPerspAMD = 4992,
BuiltInBaryCoordNoPerspCentroidAMD = 4993,
BuiltInBaryCoordNoPerspSampleAMD = 4994,
BuiltInBaryCoordSmoothAMD = 4995,
BuiltInBaryCoordSmoothCentroidAMD = 4996,
BuiltInBaryCoordSmoothSampleAMD = 4997,
BuiltInBaryCoordPullModelAMD = 4998,
BuiltInFragStencilRefEXT = 5014,
BuiltInViewportMaskNV = 5253,
BuiltInSecondaryPositionNV = 5257,
BuiltInSecondaryViewportMaskNV = 5258,
BuiltInPositionPerViewNV = 5261,
BuiltInViewportMaskPerViewNV = 5262,
BuiltInFullyCoveredEXT = 5264,
BuiltInMax = 0x7fffffff,
};
enum SelectionControlShift {
SelectionControlFlattenShift = 0,
SelectionControlDontFlattenShift = 1,
SelectionControlMax = 0x7fffffff,
};
enum SelectionControlMask {
@ -415,12 +480,17 @@ enum SelectionControlMask {
enum LoopControlShift {
LoopControlUnrollShift = 0,
LoopControlDontUnrollShift = 1,
LoopControlDependencyInfiniteShift = 2,
LoopControlDependencyLengthShift = 3,
LoopControlMax = 0x7fffffff,
};
enum LoopControlMask {
LoopControlMaskNone = 0,
LoopControlUnrollMask = 0x00000001,
LoopControlDontUnrollMask = 0x00000002,
LoopControlDependencyInfiniteMask = 0x00000004,
LoopControlDependencyLengthMask = 0x00000008,
};
enum FunctionControlShift {
@ -428,6 +498,7 @@ enum FunctionControlShift {
FunctionControlDontInlineShift = 1,
FunctionControlPureShift = 2,
FunctionControlConstShift = 3,
FunctionControlMax = 0x7fffffff,
};
enum FunctionControlMask {
@ -449,6 +520,7 @@ enum MemorySemanticsShift {
MemorySemanticsCrossWorkgroupMemoryShift = 9,
MemorySemanticsAtomicCounterMemoryShift = 10,
MemorySemanticsImageMemoryShift = 11,
MemorySemanticsMax = 0x7fffffff,
};
enum MemorySemanticsMask {
@ -469,6 +541,7 @@ enum MemoryAccessShift {
MemoryAccessVolatileShift = 0,
MemoryAccessAlignedShift = 1,
MemoryAccessNontemporalShift = 2,
MemoryAccessMax = 0x7fffffff,
};
enum MemoryAccessMask {
@ -484,22 +557,26 @@ enum Scope {
ScopeWorkgroup = 2,
ScopeSubgroup = 3,
ScopeInvocation = 4,
ScopeMax = 0x7fffffff,
};
enum GroupOperation {
GroupOperationReduce = 0,
GroupOperationInclusiveScan = 1,
GroupOperationExclusiveScan = 2,
GroupOperationMax = 0x7fffffff,
};
enum KernelEnqueueFlags {
KernelEnqueueFlagsNoWait = 0,
KernelEnqueueFlagsWaitKernel = 1,
KernelEnqueueFlagsWaitWorkGroup = 2,
KernelEnqueueFlagsMax = 0x7fffffff,
};
enum KernelProfilingInfoShift {
KernelProfilingInfoCmdExecTimeShift = 0,
KernelProfilingInfoMax = 0x7fffffff,
};
enum KernelProfilingInfoMask {
@ -564,6 +641,40 @@ enum Capability {
CapabilityStorageImageReadWithoutFormat = 55,
CapabilityStorageImageWriteWithoutFormat = 56,
CapabilityMultiViewport = 57,
CapabilitySubgroupDispatch = 58,
CapabilityNamedBarrier = 59,
CapabilityPipeStorage = 60,
CapabilitySubgroupBallotKHR = 4423,
CapabilityDrawParameters = 4427,
CapabilitySubgroupVoteKHR = 4431,
CapabilityStorageBuffer16BitAccess = 4433,
CapabilityStorageUniformBufferBlock16 = 4433,
CapabilityStorageUniform16 = 4434,
CapabilityUniformAndStorageBuffer16BitAccess = 4434,
CapabilityStoragePushConstant16 = 4435,
CapabilityStorageInputOutput16 = 4436,
CapabilityDeviceGroup = 4437,
CapabilityMultiView = 4439,
CapabilityVariablePointersStorageBuffer = 4441,
CapabilityVariablePointers = 4442,
CapabilityAtomicStorageOps = 4445,
CapabilitySampleMaskPostDepthCoverage = 4447,
CapabilityImageGatherBiasLodAMD = 5009,
CapabilityFragmentMaskAMD = 5010,
CapabilityStencilExportEXT = 5013,
CapabilityImageReadWriteLodAMD = 5015,
CapabilitySampleMaskOverrideCoverageNV = 5249,
CapabilityGeometryShaderPassthroughNV = 5251,
CapabilityShaderViewportIndexLayerEXT = 5254,
CapabilityShaderViewportIndexLayerNV = 5254,
CapabilityShaderViewportMaskNV = 5255,
CapabilityShaderStereoViewNV = 5259,
CapabilityPerViewAttributesNV = 5260,
CapabilityFragmentFullyCoveredEXT = 5265,
CapabilitySubgroupShuffleINTEL = 5568,
CapabilitySubgroupBufferBlockIOINTEL = 5569,
CapabilitySubgroupImageBlockIOINTEL = 5570,
CapabilityMax = 0x7fffffff,
};
enum Op {
@ -861,6 +972,43 @@ enum Op {
OpAtomicFlagTestAndSet = 318,
OpAtomicFlagClear = 319,
OpImageSparseRead = 320,
OpSizeOf = 321,
OpTypePipeStorage = 322,
OpConstantPipeStorage = 323,
OpCreatePipeFromPipeStorage = 324,
OpGetKernelLocalSizeForSubgroupCount = 325,
OpGetKernelMaxNumSubgroups = 326,
OpTypeNamedBarrier = 327,
OpNamedBarrierInitialize = 328,
OpMemoryNamedBarrier = 329,
OpModuleProcessed = 330,
OpExecutionModeId = 331,
OpDecorateId = 332,
OpSubgroupBallotKHR = 4421,
OpSubgroupFirstInvocationKHR = 4422,
OpSubgroupAllKHR = 4428,
OpSubgroupAnyKHR = 4429,
OpSubgroupAllEqualKHR = 4430,
OpSubgroupReadInvocationKHR = 4432,
OpGroupIAddNonUniformAMD = 5000,
OpGroupFAddNonUniformAMD = 5001,
OpGroupFMinNonUniformAMD = 5002,
OpGroupUMinNonUniformAMD = 5003,
OpGroupSMinNonUniformAMD = 5004,
OpGroupFMaxNonUniformAMD = 5005,
OpGroupUMaxNonUniformAMD = 5006,
OpGroupSMaxNonUniformAMD = 5007,
OpFragmentMaskFetchAMD = 5011,
OpFragmentFetchAMD = 5012,
OpSubgroupShuffleINTEL = 5571,
OpSubgroupShuffleDownINTEL = 5572,
OpSubgroupShuffleUpINTEL = 5573,
OpSubgroupShuffleXorINTEL = 5574,
OpSubgroupBlockReadINTEL = 5575,
OpSubgroupBlockWriteINTEL = 5576,
OpSubgroupImageBlockReadINTEL = 5577,
OpSubgroupImageBlockWriteINTEL = 5578,
OpMax = 0x7fffffff,
};
// Overload operator| for mask bit combining
@ -877,3 +1025,4 @@ inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfil
} // end namespace spv
#endif // #ifndef spirv_HPP

View File

@ -1,11 +1,11 @@
//
//Copyright (C) 2014 LunarG, Inc.
// Copyright (C) 2014 LunarG, Inc.
//
//All rights reserved.
// All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
@ -19,30 +19,26 @@
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
//POSSIBILITY OF SUCH DAMAGE.
//
// Author: John Kessenich, LunarG
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// SPIRV-IR
//
// Simple in-memory representation (IR) of SPIRV. Just for holding
// Each function's CFG of blocks. Has this hierarchy:
// - Module, which is a list of
// - Function, which is a list of
// - Block, which is a list of
// - Module, which is a list of
// - Function, which is a list of
// - Block, which is a list of
// - Instruction
//
@ -68,17 +64,18 @@ class Module;
const Id NoResult = 0;
const Id NoType = 0;
const unsigned int BadValue = 0xFFFFFFFF;
const Decoration NoPrecision = (Decoration)BadValue;
const MemorySemanticsMask MemorySemanticsAllMemory =
(MemorySemanticsMask)(MemorySemanticsAcquireMask |
MemorySemanticsReleaseMask |
MemorySemanticsAcquireReleaseMask |
MemorySemanticsSequentiallyConsistentMask |
MemorySemanticsUniformMemoryMask |
MemorySemanticsSubgroupMemoryMask |
const Decoration NoPrecision = DecorationMax;
#ifdef __GNUC__
# define POTENTIALLY_UNUSED __attribute__((unused))
#else
# define POTENTIALLY_UNUSED
#endif
POTENTIALLY_UNUSED
const MemorySemanticsMask MemorySemanticsAllMemory =
(MemorySemanticsMask)(MemorySemanticsUniformMemoryMask |
MemorySemanticsWorkgroupMemoryMask |
MemorySemanticsCrossWorkgroupMemoryMask |
MemorySemanticsAtomicCounterMemoryMask |
MemorySemanticsImageMemoryMask);
@ -95,7 +92,6 @@ public:
void addImmediateOperand(unsigned int immediate) { operands.push_back(immediate); }
void addStringOperand(const char* str)
{
originalString = str;
unsigned int word;
char* wordString = (char*)&word;
char* wordPtr = wordString;
@ -128,7 +124,6 @@ public:
Id getTypeId() const { return typeId; }
Id getIdOperand(int op) const { return operands[op]; }
unsigned int getImmediateOperand(int op) const { return operands[op]; }
const char* getStringOperand() const { return originalString.c_str(); }
// Write out the binary form.
void dump(std::vector<unsigned int>& out) const
@ -159,7 +154,6 @@ protected:
Id typeId;
Op opCode;
std::vector<Id> operands;
std::string originalString; // could be optimized away; convenience for getting string operand
Block* block;
};
@ -180,13 +174,11 @@ public:
void addInstruction(std::unique_ptr<Instruction> inst);
void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);}
void addLocalVariable(std::unique_ptr<Instruction> inst) { localVariables.push_back(std::move(inst)); }
void insertInstruction(size_t pos, std::unique_ptr<Instruction> inst);
size_t getInstructionCount() { return instructions.size(); }
Instruction* getInstruction(size_t i) { return instructions[i].get(); }
void removeInstruction(size_t i) { instructions.erase(instructions.begin() + i); }
const std::vector<Block*>& getPredecessors() const { return predecessors; }
const std::vector<Block*>& getSuccessors() const { return successors; }
const std::vector<std::unique_ptr<Instruction> >& getInstructions() const {
return instructions;
}
void setUnreachable() { unreachable = true; }
bool isUnreachable() const { return unreachable; }
// Returns the block's merge instruction, if one exists (otherwise null).
@ -205,10 +197,6 @@ public:
bool isTerminated() const
{
if (instructions.size() == 0) {
return false;
}
switch (instructions.back()->getOpCode()) {
case OpBranch:
case OpBranchConditional:
@ -224,7 +212,6 @@ public:
void dump(std::vector<unsigned int>& out) const
{
// OpLabel
instructions[0]->dump(out);
for (int i = 0; i < (int)localVariables.size(); ++i)
localVariables[i]->dump(out);
@ -232,51 +219,7 @@ public:
instructions[i]->dump(out);
}
// Moves all instructions from a target block into this block, and removes
// the target block from our list of successors.
// This function assumes this block unconditionally branches to the target
// block directly.
void merge(Block* target_block) {
if (isTerminated()) {
instructions.erase(instructions.end() - 1);
}
// Find the target block in our successors first.
for (auto it = successors.begin(); it != successors.end(); ++it) {
if (*it == target_block) {
it = successors.erase(it);
break;
}
}
// Add target block's successors to our successors.
successors.insert(successors.end(), target_block->successors.begin(),
target_block->successors.end());
// For each successor, replace the target block in their predecessors with
// us.
for (auto block : successors) {
std::replace(block->predecessors.begin(), block->predecessors.end(),
target_block, this);
}
// Move instructions from target block into this block.
for (auto it = target_block->instructions.begin();
it != target_block->instructions.end();) {
if ((*it)->getOpCode() == spv::Op::OpLabel) {
++it;
continue;
}
instructions.push_back(std::move(*it));
it = target_block->instructions.erase(it);
}
target_block->predecessors.clear();
target_block->successors.clear();
}
protected:
protected:
Block(const Block&);
Block& operator=(Block&);
@ -288,7 +231,7 @@ public:
std::vector<std::unique_ptr<Instruction> > localVariables;
Function& parent;
// track whether this block is known to be uncreachable (not necessarily
// track whether this block is known to be uncreachable (not necessarily
// true for all unreachable blocks, but should be set at least
// for the extraneous ones introduced by the builder).
bool unreachable;
@ -329,19 +272,13 @@ public:
Module& getParent() const { return parent; }
Block* getEntryBlock() const { return blocks.front(); }
Block* getLastBlock() const { return blocks.back(); }
Block* findBlockById(Id id)
{
for (auto block : blocks) {
if (block->getId() == id) {
return block;
}
}
return nullptr;
}
std::vector<Block*>& getBlocks() { return blocks; }
const std::vector<Block*>& getBlocks() const { return blocks; }
void addLocalVariable(std::unique_ptr<Instruction> inst);
Id getReturnType() const { return functionInstruction.getTypeId(); }
void setImplicitThis() { implicitThis = true; }
bool hasImplicitThis() const { return implicitThis; }
void dump(std::vector<unsigned int>& out) const
{
// OpFunction
@ -365,6 +302,7 @@ protected:
Instruction functionInstruction;
std::vector<Instruction*> parameterInstructions;
std::vector<Block*> blocks;
bool implicitThis; // true if this is a member function expecting to be passed a 'this' as the first argument
};
//
@ -380,8 +318,6 @@ public:
}
void addFunction(Function *fun) { functions.push_back(fun); }
const std::vector<Function*>& getFunctions() const { return functions; }
std::vector<Function*>& getFunctions() { return functions; }
void mapInstruction(Instruction *instruction)
{
@ -393,6 +329,7 @@ public:
}
Instruction* getInstruction(Id id) const { return idToInstruction[id]; }
const std::vector<Function*>& getFunctions() const { return functions; }
spv::Id getTypeId(Id resultId) const { return idToInstruction[resultId]->getTypeId(); }
StorageClass getStorageClass(Id typeId) const
{
@ -424,7 +361,7 @@ protected:
// - the OpFunction instruction
// - all the OpFunctionParameter instructions
__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent)
: parent(parent), functionInstruction(id, resultType, OpFunction)
: parent(parent), functionInstruction(id, resultType, OpFunction), implicitThis(false)
{
// OpFunction
functionInstruction.addImmediateOperand(FunctionControlMaskNone);
@ -465,14 +402,6 @@ __inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
parent.getParent().mapInstruction(raw_instruction);
}
__inline void Block::insertInstruction(size_t pos, std::unique_ptr<Instruction> inst) {
Instruction* raw_instruction = inst.get();
instructions.insert(instructions.begin() + pos, std::move(inst));
raw_instruction->setBlock(this);
if (raw_instruction->getResultId())
parent.getParent().mapInstruction(raw_instruction);
}
}; // end spv namespace
#endif // spvIR_H