Externals: Update glslang.

This updates glslang to commit 4fc7a33910fb8e40b970d160e1b38ab3f67fe0f3
which is the current version listed in the known_good.json file for the
version 1.2.131.2 of the Vulkan-ValidationLayers repo.
This commit is contained in:
orbea 2020-03-02 07:42:00 -08:00
parent b3c705fa96
commit 690dee3533
126 changed files with 29560 additions and 13594 deletions

View File

@ -6,10 +6,17 @@ if (POLICY CMP0048)
endif()
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
# Enable compile commands database
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# Adhere to GNU filesystem layout conventions
include(GNUInstallDirs)
# Needed for CMAKE_DEPENDENT_OPTION macro
include(CMakeDependentOption)
option(BUILD_SHARED_LIBS "Build Shared Libraries" OFF)
option(BUILD_EXTERNAL "Build external dependencies in /External" ON)
set(LIB_TYPE STATIC)
@ -21,36 +28,62 @@ option(SKIP_GLSLANG_INSTALL "Skip installation" ${SKIP_GLSLANG_INSTALL})
if(NOT ${SKIP_GLSLANG_INSTALL})
set(ENABLE_GLSLANG_INSTALL ON)
endif()
option(ENABLE_SPVREMAPPER "Enables building of SPVRemapper" ON)
option(ENABLE_AMD_EXTENSIONS "Enables support of AMD-specific extensions" ON)
option(ENABLE_GLSLANG_BINARIES "Builds glslangValidator and spirv-remap" ON)
option(ENABLE_NV_EXTENSIONS "Enables support of Nvidia-specific extensions" ON)
option(ENABLE_GLSLANG_WEB "Reduces glslang to minimum needed for web use" OFF)
option(ENABLE_GLSLANG_WEB_DEVEL "For ENABLE_GLSLANG_WEB builds, enables compilation error messages" OFF)
option(ENABLE_EMSCRIPTEN_SINGLE_FILE "If using Emscripten, enables SINGLE_FILE build" OFF)
option(ENABLE_EMSCRIPTEN_ENVIRONMENT_NODE "If using Emscripten, builds to run on Node instead of Web" OFF)
option(ENABLE_HLSL "Enables HLSL input support" ON)
CMAKE_DEPENDENT_OPTION(ENABLE_HLSL "Enables HLSL input support" ON "NOT ENABLE_GLSLANG_WEB" OFF)
option(ENABLE_OPT "Enables spirv-opt capability if present" ON)
option(ENABLE_PCH "Enables Precompiled header" ON)
option(ENABLE_CTEST "Enables testing" ON)
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND WIN32)
set(CMAKE_INSTALL_PREFIX "install" CACHE STRING "..." FORCE)
endif()
option(USE_CCACHE "Use ccache" OFF)
if(USE_CCACHE)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
endif(CCACHE_FOUND)
endif()
# Precompiled header macro. Parameters are source file list and filename for pch cpp file.
macro(glslang_pch SRCS PCHCPP)
if(MSVC AND CMAKE_GENERATOR MATCHES "^Visual Studio" AND ENABLE_PCH)
set(PCH_NAME "$(IntDir)\\pch.pch")
# make source files use/depend on PCH_NAME
set_source_files_properties(${${SRCS}} PROPERTIES COMPILE_FLAGS "/Yupch.h /FIpch.h /Fp${PCH_NAME} /Zm300" OBJECT_DEPENDS "${PCH_NAME}")
# make PCHCPP file compile and generate PCH_NAME
set_source_files_properties(${PCHCPP} PROPERTIES COMPILE_FLAGS "/Ycpch.h /Fp${PCH_NAME} /Zm300" OBJECT_OUTPUTS "${PCH_NAME}")
list(APPEND ${SRCS} "${PCHCPP}")
endif()
endmacro(glslang_pch)
project(glslang)
# make testing optional
include(CTest)
if(ENABLE_AMD_EXTENSIONS)
add_definitions(-DAMD_EXTENSIONS)
endif(ENABLE_AMD_EXTENSIONS)
if(ENABLE_NV_EXTENSIONS)
add_definitions(-DNV_EXTENSIONS)
endif(ENABLE_NV_EXTENSIONS)
if(ENABLE_CTEST)
include(CTest)
endif()
if(ENABLE_HLSL)
add_definitions(-DENABLE_HLSL)
endif(ENABLE_HLSL)
if(ENABLE_GLSLANG_WEB)
add_definitions(-DGLSLANG_WEB)
if(ENABLE_GLSLANG_WEB_DEVEL)
add_definitions(-DGLSLANG_WEB_DEVEL)
endif(ENABLE_GLSLANG_WEB_DEVEL)
endif(ENABLE_GLSLANG_WEB)
if(WIN32)
set(CMAKE_DEBUG_POSTFIX "d")
if(MSVC)
@ -67,12 +100,40 @@ if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
add_compile_options(-Wall -Wmaybe-uninitialized -Wuninitialized -Wunused -Wunused-local-typedefs
-Wunused-parameter -Wunused-value -Wunused-variable -Wunused-but-set-parameter -Wunused-but-set-variable -fno-exceptions)
add_compile_options(-Wno-reorder) # disable this from -Wall, since it happens all over.
add_compile_options(-fno-rtti)
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
add_compile_options(-Wall -Wuninitialized -Wunused -Wunused-local-typedefs
-Wunused-parameter -Wunused-value -Wunused-variable)
add_compile_options(-Wno-reorder) # disable this from -Wall, since it happens all over.
add_compile_options(-fno-rtti)
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "MSVC")
add_compile_options(/GR-) # Disable RTTI
endif()
if(EMSCRIPTEN)
add_compile_options(-Os -fno-exceptions)
add_compile_options("SHELL: -s WASM=1")
add_compile_options("SHELL: -s WASM_OBJECT_FILES=0")
add_link_options(-Os)
add_link_options("SHELL: -s FILESYSTEM=0")
add_link_options("SHELL: --llvm-lto 1")
add_link_options("SHELL: --closure 1")
add_link_options("SHELL: -s ALLOW_MEMORY_GROWTH=1")
if(ENABLE_EMSCRIPTEN_SINGLE_FILE)
add_link_options("SHELL: -s SINGLE_FILE=1")
endif(ENABLE_EMSCRIPTEN_SINGLE_FILE)
else()
if(ENABLE_GLSLANG_WEB)
if(MSVC)
add_compile_options(/Os /GR-)
else()
add_compile_options(-Os -fno-exceptions)
add_link_options(-Os)
endif()
endif(ENABLE_GLSLANG_WEB)
endif(EMSCRIPTEN)
# Request C++11
if(${CMAKE_VERSION} VERSION_LESS 3.1)
# CMake versions before 3.1 do not understand CMAKE_CXX_STANDARD
@ -93,8 +154,14 @@ function(glslang_set_link_args TARGET)
endif()
endfunction(glslang_set_link_args)
# We depend on these for later projects, so they should come first.
add_subdirectory(External)
# CMake needs to find the right version of python, right from the beginning,
# otherwise, it will find the wrong version and fail later
if(BUILD_EXTERNAL AND IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/External)
find_package(PythonInterp 3 REQUIRED)
# We depend on these for later projects, so they should come first.
add_subdirectory(External)
endif()
if(NOT TARGET SPIRV-Tools-opt)
set(ENABLE_OPT OFF)
@ -119,4 +186,29 @@ add_subdirectory(SPIRV)
if(ENABLE_HLSL)
add_subdirectory(hlsl)
endif(ENABLE_HLSL)
add_subdirectory(gtests)
if(ENABLE_CTEST)
add_subdirectory(gtests)
endif()
if(BUILD_TESTING)
# glslang-testsuite runs a bash script on Windows.
# Make sure to use '-o igncr' flag to ignore carriage returns (\r).
set(IGNORE_CR_FLAG "")
if(WIN32)
set(IGNORE_CR_FLAG -o igncr)
endif()
if (CMAKE_CONFIGURATION_TYPES)
set(RESULTS_PATH ${CMAKE_CURRENT_BINARY_DIR}/$<CONFIGURATION>/localResults)
set(VALIDATOR_PATH ${CMAKE_CURRENT_BINARY_DIR}/StandAlone/$<CONFIGURATION>/glslangValidator)
set(REMAP_PATH ${CMAKE_CURRENT_BINARY_DIR}/StandAlone/$<CONFIGURATION>/spirv-remap)
else(CMAKE_CONFIGURATION_TYPES)
set(RESULTS_PATH ${CMAKE_CURRENT_BINARY_DIR}/localResults)
set(VALIDATOR_PATH ${CMAKE_CURRENT_BINARY_DIR}/StandAlone/glslangValidator)
set(REMAP_PATH ${CMAKE_CURRENT_BINARY_DIR}/StandAlone/spirv-remap)
endif(CMAKE_CONFIGURATION_TYPES)
add_test(NAME glslang-testsuite
COMMAND bash ${IGNORE_CR_FLAG} runtests ${RESULTS_PATH} ${VALIDATOR_PATH} ${REMAP_PATH}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/Test/)
endif(BUILD_TESTING)

View File

@ -10,7 +10,8 @@ if(BUILD_TESTING)
if(WIN32)
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
endif(WIN32)
add_subdirectory(googletest)
# EXCLUDE_FROM_ALL keeps the install target from installing GTEST files.
add_subdirectory(googletest EXCLUDE_FROM_ALL)
set(GTEST_TARGETS
gtest
gtest_main

108
Externals/glslang/LICENSE.txt vendored Normal file
View File

@ -0,0 +1,108 @@
Here, glslang proper means core GLSL parsing, HLSL parsing, and SPIR-V code
generation. Glslang proper requires use of two licenses, one that covers
non-preprocessing and an additional one that covers preprocessing.
Bison was removed long ago. You can build glslang from the source grammar,
using tools of your choice, without using bison or any bison files.
Other parts, outside of glslang proper, include:
- gl_types.h, only needed for OpenGL-like reflection, and can be left out of
a parse and codegen project. See it for its license.
- update_glslang_sources.py, which is not part of the project proper and does
not need to be used.
- the SPIR-V "remapper", which is optional, but has the same license as
glslang proper
- Google tests and SPIR-V tools, and anything in the external subdirectory
are external and optional; see them for their respective licenses.
--------------------------------------------------------------------------------
The core of glslang-proper, minus the preprocessor is licenced as follows:
//
// Copyright (C) 2015-2018 Google, Inc.
// Copyright (C) <various other dates and companies>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
--------------------------------------------------------------------------------
The preprocessor has the core license stated above, plus an additional licence:
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/

View File

@ -9,6 +9,7 @@ if(WIN32)
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OGLCompiler
install(TARGETS OGLCompiler EXPORT OGLCompilerTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT OGLCompilerTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -28,6 +28,15 @@ comment in `glslang/MachineIndependent/Versions.cpp`.
Tasks waiting to be done are documented as GitHub issues.
Deprecations
------------
1. GLSLang, when installed through CMake, will install a `SPIRV` folder into
`${CMAKE_INSTALL_INCLUDEDIR}`. This `SPIRV` folder is being moved to
`glslang/SPIRV`. During the transition the `SPIRV` folder will be installed into
both locations. The old install of `SPIRV/` will be removed as a CMake install
target no sooner then May 1, 2020. See issue #1964.
Execution of Standalone Wrapper
-------------------------------
@ -61,7 +70,7 @@ branch.
(For MSVS: 2015 is recommended, 2013 is fully supported/tested, and 2010 support is attempted, but not tested.)
* [CMake][cmake]: for generating compilation targets.
* make: _Linux_, ninja is an alternative, if configured.
* [Python 2.7][python]: for executing SPIRV-Tools scripts. (Optional if not using SPIRV-Tools.)
* [Python 3.x][python]: for executing SPIRV-Tools scripts. (Optional if not using SPIRV-Tools and the 'External' subdirectory does not exist.)
* [bison][bison]: _optional_, but needed when changing the grammar (glslang.y).
* [googletest][googletest]: _optional_, but should use if making any changes to glslang.
@ -84,9 +93,18 @@ cd <the directory glslang was cloned to, "External" will be a subdirectory>
git clone https://github.com/google/googletest.git External/googletest
```
If you want to use googletest with Visual Studio 2013, you also need to check out an older version:
```bash
# to use googletest with Visual Studio 2013
cd External/googletest
git checkout 440527a61e1c91188195f7de212c63c77e8f0a45
cd ../..
```
If you wish to assure that SPIR-V generated from HLSL is legal for Vulkan,
or wish to invoke -Os to reduce SPIR-V size from HLSL or GLSL, install
spirv-tools with this:
wish to invoke -Os to reduce SPIR-V size from HLSL or GLSL, or wish to run the
integrated test suite, install spirv-tools with this:
```bash
./update_glslang_sources.py
@ -105,8 +123,8 @@ cd $BUILD_DIR
For building on Linux:
```bash
cmake -DCMAKE_BUILD_TYPE={Debug|Release|RelWithDebInfo} \
-DCMAKE_INSTALL_PREFIX="$(pwd)/install" $SOURCE_DIR
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="$(pwd)/install" $SOURCE_DIR
# "Release" (for CMAKE_BUILD_TYPE) could also be "Debug" or "RelWithDebInfo"
```
For building on Windows:
@ -118,6 +136,9 @@ cmake $SOURCE_DIR -DCMAKE_INSTALL_PREFIX="$(pwd)/install"
The CMake GUI also works for Windows (version 3.4.1 tested).
Also, consider using `git config --global core.fileMode false` (or with `--local`) on Windows
to prevent the addition of execution permission on files.
#### 4) Build and Install
```bash
@ -125,8 +146,8 @@ The CMake GUI also works for Windows (version 3.4.1 tested).
make -j4 install
# for Windows:
cmake --build . --config {Release|Debug|MinSizeRel|RelWithDebInfo} \
--target install
cmake --build . --config Release --target install
# "Release" (for --config) could also be "Debug", "MinSizeRel", or "RelWithDebInfo"
```
If using MSVC, after running CMake to configure, use the
@ -143,13 +164,40 @@ changes are quite infrequent. For windows you can get binaries from
The command to rebuild is:
```bash
m4 -P MachineIndependent/glslang.m4 > MachineIndependent/glslang.y
bison --defines=MachineIndependent/glslang_tab.cpp.h
-t MachineIndependent/glslang.y
-o MachineIndependent/glslang_tab.cpp
```
The above command is also available in the bash script at
`glslang/updateGrammar`.
The above commands are also available in the bash script in `updateGrammar`,
when executed from the glslang subdirectory of the glslang repository.
With no arguments it builds the full grammar, and with a "web" argument,
the web grammar subset (see more about the web subset in the next section).
### Building to WASM for the Web and Node
Use the steps in [Build Steps](#build-steps), with the following notes/exceptions:
* For building the web subset of core glslang:
+ execute `updateGrammar web` from the glslang subdirectory
(or if using your own scripts, `m4` needs a `-DGLSLANG_WEB` argument)
+ set `-DENABLE_HLSL=OFF -DBUILD_TESTING=OFF -DENABLE_OPT=OFF -DINSTALL_GTEST=OFF`
+ turn on `-DENABLE_GLSLANG_WEB=ON`
+ optionally, for GLSL compilation error messages, turn on `-DENABLE_GLSLANG_WEB_DEVEL=ON`
* `emsdk` needs to be present in your executable search path, *PATH* for
Bash-like enivironments
+ [Instructions located
here](https://emscripten.org/docs/getting_started/downloads.html#sdk-download-and-install)
* Wrap cmake call: `emcmake cmake`
* To get a fully minimized build, make sure to use `brotli` to compress the .js
and .wasm files
Example:
```sh
emcmake cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_GLSLANG_WEB=ON \
-DENABLE_HLSL=OFF -DBUILD_TESTING=OFF -DENABLE_OPT=OFF -DINSTALL_GTEST=OFF ..
```
Testing
-------
@ -188,6 +236,11 @@ Running `runtests` script-backed tests:
cd $SOURCE_DIR/Test && ./runtests
```
If some tests fail with validation errors, there may be a mismatch between the
version of `spirv-val` on the system and the version of glslang. In this
case, it is necessary to run `update_glslang_sources.py`. See "Check-Out
External Projects" above for more details.
### Contributing tests
Test results should always be included with a pull request that modifies
@ -232,7 +285,8 @@ The `main()` in `StandAlone/StandAlone.cpp` shows examples using both styles.
### C++ Class Interface (new, preferred)
This interface is in roughly the last 1/3 of `ShaderLang.h`. It is in the
glslang namespace and contains the following.
glslang namespace and contains the following, here with suggested calls
for generating SPIR-V:
```cxx
const char* GetEsslVersionString();
@ -255,10 +309,19 @@ class TProgram
Reflection queries
```
See `ShaderLang.h` and the usage of it in `StandAlone/StandAlone.cpp` for more
details.
For just validating (not generating code), subsitute these calls:
### C Functional Interface (orignal)
```cxx
setEnvInput(EShSourceHlsl or EShSourceGlsl, stage, EShClientNone, 0);
setEnvClient(EShClientNone, 0);
setEnvTarget(EShTargetNone, 0);
```
See `ShaderLang.h` and the usage of it in `StandAlone/StandAlone.cpp` for more
details. There is a block comment giving more detail above the calls for
`setEnvInput, setEnvClient, and setEnvTarget`.
### C Functional Interface (original)
This interface is in roughly the first 2/3 of `ShaderLang.h`, and referred to
as the `Sh*()` interface, as all the entry points start `Sh`.

61
Externals/glslang/SPIRV/CMakeLists.txt vendored Executable file → Normal file
View File

@ -3,7 +3,9 @@ set(SOURCES
InReadableOrder.cpp
Logger.cpp
SpvBuilder.cpp
SpvPostProcess.cpp
doc.cpp
SpvTools.cpp
disassemble.cpp)
set(SPVREMAP_SOURCES
@ -22,36 +24,33 @@ set(HEADERS
SpvBuilder.h
spvIR.h
doc.h
disassemble.h)
SpvTools.h
disassemble.h
GLSL.ext.AMD.h
GLSL.ext.NV.h)
set(SPVREMAP_HEADERS
SPVRemapper.h
doc.h)
if(ENABLE_AMD_EXTENSIONS)
list(APPEND
HEADERS
GLSL.ext.AMD.h)
endif(ENABLE_AMD_EXTENSIONS)
if(ENABLE_NV_EXTENSIONS)
list(APPEND
HEADERS
GLSL.ext.NV.h)
endif(ENABLE_NV_EXTENSIONS)
add_library(SPIRV ${LIB_TYPE} ${SOURCES} ${HEADERS})
set_property(TARGET SPIRV PROPERTY FOLDER glslang)
set_property(TARGET SPIRV PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(SPIRV PUBLIC ..)
target_include_directories(SPIRV PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
add_library(SPVRemapper ${LIB_TYPE} ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
set_property(TARGET SPVRemapper PROPERTY FOLDER glslang)
set_property(TARGET SPVRemapper PROPERTY POSITION_INDEPENDENT_CODE ON)
if (ENABLE_SPVREMAPPER)
add_library(SPVRemapper ${LIB_TYPE} ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
set_property(TARGET SPVRemapper PROPERTY FOLDER glslang)
set_property(TARGET SPVRemapper PROPERTY POSITION_INDEPENDENT_CODE ON)
endif()
if(WIN32 AND BUILD_SHARED_LIBS)
set_target_properties(SPIRV PROPERTIES PREFIX "")
set_target_properties(SPVRemapper PROPERTIES PREFIX "")
if (ENABLE_SPVREMAPPER)
set_target_properties(SPVRemapper PROPERTIES PREFIX "")
endif()
endif()
if(ENABLE_OPT)
@ -60,6 +59,9 @@ if(ENABLE_OPT)
PRIVATE ${spirv-tools_SOURCE_DIR}/source
)
target_link_libraries(SPIRV glslang SPIRV-Tools-opt)
target_include_directories(SPIRV PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../External>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/External>)
else()
target_link_libraries(SPIRV glslang)
endif(ENABLE_OPT)
@ -71,13 +73,30 @@ endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
if(BUILD_SHARED_LIBS)
install(TARGETS SPIRV SPVRemapper
if (ENABLE_SPVREMAPPER)
install(TARGETS SPVRemapper EXPORT SPVRemapperTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
install(TARGETS SPIRV EXPORT SPIRVTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
else()
install(TARGETS SPIRV SPVRemapper
if (ENABLE_SPVREMAPPER)
install(TARGETS SPVRemapper EXPORT SPVRemapperTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
install(TARGETS SPIRV EXPORT SPIRVTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
if (ENABLE_SPVREMAPPER)
install(EXPORT SPVRemapperTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif()
install(EXPORT SPIRVTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/SPIRV/)
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/glslang/SPIRV/)
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -28,10 +28,12 @@
#define GLSLextEXT_H
static const int GLSLextEXTVersion = 100;
static const int GLSLextEXTRevision = 1;
static const int GLSLextEXTRevision = 2;
static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export";
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation";
#endif // #ifndef GLSLextEXT_H

View File

@ -36,7 +36,13 @@ static const char* const E_SPV_KHR_device_group = "SPV_KHR_devic
static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview";
static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters";
static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage";
static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer";
static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock";
static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock";
#endif // #ifndef GLSLextKHR_H

View File

@ -33,7 +33,7 @@ enum Op;
enum Capability;
static const int GLSLextNVVersion = 100;
static const int GLSLextNVRevision = 5;
static const int GLSLextNVRevision = 11;
//SPV_NV_sample_mask_override_coverage
const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage";
@ -54,4 +54,28 @@ const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_p
//SPV_NV_shader_subgroup_partitioned
const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned";
#endif // #ifndef GLSLextNV_H
//SPV_NV_fragment_shader_barycentric
const char* const E_SPV_NV_fragment_shader_barycentric = "SPV_NV_fragment_shader_barycentric";
//SPV_NV_compute_shader_derivatives
const char* const E_SPV_NV_compute_shader_derivatives = "SPV_NV_compute_shader_derivatives";
//SPV_NV_shader_image_footprint
const char* const E_SPV_NV_shader_image_footprint = "SPV_NV_shader_image_footprint";
//SPV_NV_mesh_shader
const char* const E_SPV_NV_mesh_shader = "SPV_NV_mesh_shader";
//SPV_NV_raytracing
const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing";
//SPV_NV_shading_rate
const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
//SPV_NV_cooperative_matrix
const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
//SPV_NV_shader_sm_builtins
const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins";
#endif // #ifndef GLSLextNV_H

0
Externals/glslang/SPIRV/GLSL.std.450.h vendored Executable file → Normal file
View File

3135
Externals/glslang/SPIRV/GlslangToSpv.cpp vendored Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
//
// Copyright (C) 2014 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -38,7 +39,8 @@
#pragma warning(disable : 4464) // relative include path contains '..'
#endif
#include "../glslang/Include/intermediate.h"
#include "SpvTools.h"
#include "glslang/Include/intermediate.h"
#include <string>
#include <vector>
@ -47,14 +49,6 @@
namespace glslang {
struct SpvOptions {
SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
optimizeSize(false) { }
bool generateDebugInfo;
bool disableOptimizer;
bool optimizeSize;
};
void GetSpirvVersion(std::string&);
int GetSpirvGeneratorVersion();
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,

View File

@ -61,17 +61,22 @@ namespace {
// Use by calling visit() on the root block.
class ReadableOrderTraverser {
public:
explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
ReadableOrderTraverser(std::function<void(Block*, spv::ReachReason, Block*)> callback)
: callback_(callback) {}
// Visits the block if it hasn't been visited already and isn't currently
// being delayed. Invokes callback(block), then descends into its
// being delayed. Invokes callback(block, why, header), then descends into its
// successors. Delays merge-block and continue-block processing until all
// the branches have been completed.
void visit(Block* block)
// the branches have been completed. If |block| is an unreachable merge block or
// an unreachable continue target, then |header| is the corresponding header block.
void visit(Block* block, spv::ReachReason why, Block* header)
{
assert(block);
if (why == spv::ReachViaControlFlow) {
reachableViaControlFlow_.insert(block);
}
if (visited_.count(block) || delayed_.count(block))
return;
callback_(block);
callback_(block, why, header);
visited_.insert(block);
Block* mergeBlock = nullptr;
Block* continueBlock = nullptr;
@ -87,27 +92,40 @@ public:
delayed_.insert(continueBlock);
}
}
const auto successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it);
if (why == spv::ReachViaControlFlow) {
const auto& successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it, why, nullptr);
}
if (continueBlock) {
const spv::ReachReason continueWhy =
(reachableViaControlFlow_.count(continueBlock) > 0)
? spv::ReachViaControlFlow
: spv::ReachDeadContinue;
delayed_.erase(continueBlock);
visit(continueBlock);
visit(continueBlock, continueWhy, block);
}
if (mergeBlock) {
const spv::ReachReason mergeWhy =
(reachableViaControlFlow_.count(mergeBlock) > 0)
? spv::ReachViaControlFlow
: spv::ReachDeadMerge;
delayed_.erase(mergeBlock);
visit(mergeBlock);
visit(mergeBlock, mergeWhy, block);
}
}
private:
std::function<void(Block*)> callback_;
std::function<void(Block*, spv::ReachReason, Block*)> callback_;
// Whether a block has already been visited or is being delayed.
std::unordered_set<Block *> visited_, delayed_;
// The set of blocks that actually are reached via control flow.
std::unordered_set<Block *> reachableViaControlFlow_;
};
}
void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
void spv::inReadableOrder(Block* root, std::function<void(Block*, spv::ReachReason, Block*)> callback)
{
ReadableOrderTraverser(callback).visit(root);
ReadableOrderTraverser(callback).visit(root, spv::ReachViaControlFlow, nullptr);
}

View File

@ -32,6 +32,8 @@
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_WEB
#include "Logger.h"
#include <algorithm>
@ -66,3 +68,5 @@ std::string SpvBuildLogger::getAllMessages() const {
}
} // end spv namespace
#endif

View File

@ -46,6 +46,14 @@ class SpvBuildLogger {
public:
SpvBuildLogger() {}
#ifdef GLSLANG_WEB
void tbdFunctionality(const std::string& f) { }
void missingFunctionality(const std::string& f) { }
void warning(const std::string& w) { }
void error(const std::string& e) { errors.push_back(e); }
std::string getAllMessages() { return ""; }
#else
// Registers a TBD functionality.
void tbdFunctionality(const std::string& f);
// Registers a missing functionality.
@ -59,6 +67,7 @@ public:
// Returns all messages accumulated in the order of:
// TBD functionalities, missing functionalities, warnings, errors.
std::string getAllMessages() const;
#endif
private:
SpvBuildLogger(const SpvBuildLogger&);

18
Externals/glslang/SPIRV/SPVRemapper.cpp vendored Executable file → Normal file
View File

@ -220,11 +220,11 @@ namespace spv {
bool spirvbin_t::isConstOp(spv::Op opCode) const
{
switch (opCode) {
case spv::OpConstantNull:
case spv::OpConstantSampler:
error("unimplemented constant type");
return true;
case spv::OpConstantNull:
case spv::OpConstantTrue:
case spv::OpConstantFalse:
case spv::OpConstantComposite:
@ -1326,10 +1326,6 @@ namespace spv {
case spv::OpTypeReserveId: return 300002;
case spv::OpTypeQueue: return 300003;
case spv::OpTypePipe: return 300004;
case spv::OpConstantNull: return 300005;
case spv::OpConstantSampler: return 300006;
case spv::OpConstantTrue: return 300007;
case spv::OpConstantFalse: return 300008;
case spv::OpConstantComposite:
@ -1346,6 +1342,18 @@ namespace spv {
hash += w * spv[typeStart+w];
return hash;
}
case spv::OpConstantNull:
{
std::uint32_t hash = 500009 + hashType(idPos(spv[typeStart+1]));
return hash;
}
case spv::OpConstantSampler:
{
std::uint32_t hash = 600011 + hashType(idPos(spv[typeStart+1]));
for (unsigned w=3; w < wordCount; ++w)
hash += w * spv[typeStart+w];
return hash;
}
default:
error("unknown type opcode");

4
Externals/glslang/SPIRV/SPVRemapper.h vendored Executable file → Normal file
View File

@ -45,7 +45,7 @@ namespace spv {
// MSVC defines __cplusplus as an older value, even when it supports almost all of 11.
// We handle that here by making our own symbol.
#if __cplusplus >= 201103L || _MSC_VER >= 1700
#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1700)
# define use_cpp11 1
#endif
@ -195,7 +195,7 @@ private:
// Header access & set methods
spirword_t magic() const { return spv[0]; } // return magic number
spirword_t bound() const { return spv[3]; } // return Id bound from header
spirword_t bound(spirword_t b) { return spv[3] = b; };
spirword_t bound(spirword_t b) { return spv[3] = b; }
spirword_t genmagic() const { return spv[2]; } // generator magic
spirword_t genmagic(spirword_t m) { return spv[2] = m; }
spirword_t schemaNum() const { return spv[4]; } // schema number from header

View File

@ -1,6 +1,6 @@
//
// Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2015-2016 Google, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -46,7 +46,9 @@
#include "SpvBuilder.h"
#ifndef GLSLANG_WEB
#include "hex_float.h"
#endif
#ifndef _WIN32
#include <cstdio>
@ -60,6 +62,7 @@ Builder::Builder(unsigned int spvVersion, unsigned int magicNumber, SpvBuildLogg
sourceVersion(0),
sourceFileStringId(NoResult),
currentLine(0),
currentFile(nullptr),
emitOpLines(false),
addressModel(AddressingModelLogical),
memoryModel(MemoryModelGLSL450),
@ -81,13 +84,15 @@ Id Builder::import(const char* name)
{
Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport);
import->addStringOperand(name);
module.mapInstruction(import);
imports.push_back(std::unique_ptr<Instruction>(import));
return import->getResultId();
}
// Emit an OpLine if we've been asked to emit OpLines and the line number
// has changed since the last time, and is a valid line number.
// Emit instruction for non-filename-based #line directives (ie. no filename
// seen yet): emit an OpLine if we've been asked to emit OpLines and the line
// number has changed since the last time, and is a valid line number.
void Builder::setLine(int lineNum)
{
if (lineNum != 0 && lineNum != currentLine) {
@ -97,6 +102,26 @@ void Builder::setLine(int lineNum)
}
}
// If no filename, do non-filename-based #line emit. Else do filename-based emit.
// Emit OpLine if we've been asked to emit OpLines and the line number or filename
// has changed since the last time, and line number is valid.
void Builder::setLine(int lineNum, const char* filename)
{
if (filename == nullptr) {
setLine(lineNum);
return;
}
if ((lineNum != 0 && lineNum != currentLine) || currentFile == nullptr ||
strncmp(filename, currentFile, strlen(currentFile) + 1) != 0) {
currentLine = lineNum;
currentFile = filename;
if (emitOpLines) {
spv::Id strId = getStringId(filename);
addLine(strId, currentLine, 0);
}
}
}
void Builder::addLine(Id fileName, int lineNum, int column)
{
Instruction* line = new Instruction(OpLine);
@ -171,8 +196,47 @@ Id Builder::makePointer(StorageClass storageClass, Id pointee)
return type->getResultId();
}
Id Builder::makeForwardPointer(StorageClass storageClass)
{
// Caching/uniquifying doesn't work here, because we don't know the
// pointee type and there can be multiple forward pointers of the same
// storage type. Somebody higher up in the stack must keep track.
Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeForwardPointer);
type->addImmediateOperand(storageClass);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
return type->getResultId();
}
Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardPointerType, Id pointee)
{
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
type = groupedTypes[OpTypePointer][t];
if (type->getImmediateOperand(0) == (unsigned)storageClass &&
type->getIdOperand(1) == pointee)
return type->getResultId();
}
type = new Instruction(forwardPointerType, NoType, OpTypePointer);
type->addImmediateOperand(storageClass);
type->addIdOperand(pointee);
groupedTypes[OpTypePointer].push_back(type);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
return type->getResultId();
}
Id Builder::makeIntegerType(int width, bool hasSign)
{
#ifdef GLSLANG_WEB
assert(width == 32);
width = 32;
#endif
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
@ -193,10 +257,8 @@ Id Builder::makeIntegerType(int width, bool hasSign)
// deal with capabilities
switch (width) {
case 8:
addCapability(CapabilityInt8);
break;
case 16:
addCapability(CapabilityInt16);
// these are currently handled by storage-type declarations and post processing
break;
case 64:
addCapability(CapabilityInt64);
@ -210,6 +272,11 @@ Id Builder::makeIntegerType(int width, bool hasSign)
Id Builder::makeFloatType(int width)
{
#ifdef GLSLANG_WEB
assert(width == 32);
width = 32;
#endif
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
@ -228,7 +295,7 @@ Id Builder::makeFloatType(int width)
// deal with capabilities
switch (width) {
case 16:
addCapability(CapabilityFloat16);
// currently handled by storage-type declarations and post processing
break;
case 64:
addCapability(CapabilityFloat64);
@ -333,6 +400,33 @@ Id Builder::makeMatrixType(Id component, int cols, int rows)
return type->getResultId();
}
Id Builder::makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols)
{
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeCooperativeMatrixNV].size(); ++t) {
type = groupedTypes[OpTypeCooperativeMatrixNV][t];
if (type->getIdOperand(0) == component &&
type->getIdOperand(1) == scope &&
type->getIdOperand(2) == rows &&
type->getIdOperand(3) == cols)
return type->getResultId();
}
// not found, make it
type = new Instruction(getUniqueId(), NoType, OpTypeCooperativeMatrixNV);
type->addIdOperand(component);
type->addIdOperand(scope);
type->addIdOperand(rows);
type->addIdOperand(cols);
groupedTypes[OpTypeCooperativeMatrixNV].push_back(type);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
return type->getResultId();
}
// TODO: performance: track arrays per stride
// If a stride is supplied (non-zero) make an array.
// If no stride (0), reuse previous array types.
@ -434,6 +528,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
#ifndef GLSLANG_WEB
// deal with capabilities
switch (dim) {
case DimBuffer:
@ -479,6 +574,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
addCapability(CapabilityImageMSArray);
}
}
#endif
return type->getResultId();
}
@ -504,12 +600,29 @@ Id Builder::makeSampledImageType(Id imageType)
return type->getResultId();
}
#ifndef GLSLANG_WEB
Id Builder::makeAccelerationStructureNVType()
{
Instruction *type;
if (groupedTypes[OpTypeAccelerationStructureNV].size() == 0) {
type = new Instruction(getUniqueId(), NoType, OpTypeAccelerationStructureNV);
groupedTypes[OpTypeAccelerationStructureNV].push_back(type);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
} else {
type = groupedTypes[OpTypeAccelerationStructureNV].back();
}
return type->getResultId();
}
#endif
Id Builder::getDerefTypeId(Id resultId) const
{
Id typeId = getTypeId(resultId);
assert(isPointerType(typeId));
return module.getInstruction(typeId)->getImmediateOperand(1);
return module.getInstruction(typeId)->getIdOperand(1);
}
Op Builder::getMostBasicTypeClass(Id typeId) const
@ -519,12 +632,6 @@ Op Builder::getMostBasicTypeClass(Id typeId) const
Op typeClass = instr->getOpCode();
switch (typeClass)
{
case OpTypeVoid:
case OpTypeBool:
case OpTypeInt:
case OpTypeFloat:
case OpTypeStruct:
return typeClass;
case OpTypeVector:
case OpTypeMatrix:
case OpTypeArray:
@ -533,8 +640,7 @@ Op Builder::getMostBasicTypeClass(Id typeId) const
case OpTypePointer:
return getMostBasicTypeClass(instr->getIdOperand(1));
default:
assert(0);
return OpTypeFloat;
return typeClass;
}
}
@ -547,17 +653,21 @@ int Builder::getNumTypeConstituents(Id typeId) const
case OpTypeBool:
case OpTypeInt:
case OpTypeFloat:
case OpTypePointer:
return 1;
case OpTypeVector:
case OpTypeMatrix:
return instr->getImmediateOperand(1);
case OpTypeArray:
{
Id lengthId = instr->getImmediateOperand(1);
Id lengthId = instr->getIdOperand(1);
return module.getInstruction(lengthId)->getImmediateOperand(0);
}
case OpTypeStruct:
return instr->getNumOperands();
case OpTypeCooperativeMatrixNV:
// has only one constituent when used with OpCompositeConstruct.
return 1;
default:
assert(0);
return 1;
@ -604,6 +714,7 @@ Id Builder::getContainedTypeId(Id typeId, int member) const
case OpTypeMatrix:
case OpTypeArray:
case OpTypeRuntimeArray:
case OpTypeCooperativeMatrixNV:
return instr->getIdOperand(0);
case OpTypePointer:
return instr->getIdOperand(1);
@ -621,6 +732,55 @@ Id Builder::getContainedTypeId(Id typeId) const
return getContainedTypeId(typeId, 0);
}
// Returns true if 'typeId' is or contains a scalar type declared with 'typeOp'
// of width 'width'. The 'width' is only consumed for int and float types.
// Returns false otherwise.
bool Builder::containsType(Id typeId, spv::Op typeOp, unsigned int width) const
{
const Instruction& instr = *module.getInstruction(typeId);
Op typeClass = instr.getOpCode();
switch (typeClass)
{
case OpTypeInt:
case OpTypeFloat:
return typeClass == typeOp && instr.getImmediateOperand(0) == width;
case OpTypeStruct:
for (int m = 0; m < instr.getNumOperands(); ++m) {
if (containsType(instr.getIdOperand(m), typeOp, width))
return true;
}
return false;
case OpTypePointer:
return false;
case OpTypeVector:
case OpTypeMatrix:
case OpTypeArray:
case OpTypeRuntimeArray:
return containsType(getContainedTypeId(typeId), typeOp, width);
default:
return typeClass == typeOp;
}
}
// return true if the type is a pointer to PhysicalStorageBufferEXT or an
// array of such pointers. These require restrict/aliased decorations.
bool Builder::containsPhysicalStorageBufferOrArray(Id typeId) const
{
const Instruction& instr = *module.getInstruction(typeId);
Op typeClass = instr.getOpCode();
switch (typeClass)
{
case OpTypePointer:
return getTypeStorageClass(typeId) == StorageClassPhysicalStorageBufferEXT;
case OpTypeArray:
return containsPhysicalStorageBufferOrArray(getContainedTypeId(typeId));
default:
return false;
}
}
// See if a scalar constant of this type has already been created, so it
// can be reused rather than duplicated. (Required by the specification).
Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value)
@ -794,6 +954,10 @@ Id Builder::makeFloatConstant(float f, bool specConstant)
Id Builder::makeDoubleConstant(double d, bool specConstant)
{
#ifdef GLSLANG_WEB
assert(0);
return NoResult;
#else
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(64);
union { double db; unsigned long long ull; } u;
@ -818,10 +982,15 @@ Id Builder::makeDoubleConstant(double d, bool specConstant)
module.mapInstruction(c);
return c->getResultId();
#endif
}
Id Builder::makeFloat16Constant(float f16, bool specConstant)
{
#ifdef GLSLANG_WEB
assert(0);
return NoResult;
#else
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(16);
@ -846,36 +1015,43 @@ Id Builder::makeFloat16Constant(float f16, bool specConstant)
module.mapInstruction(c);
return c->getResultId();
#endif
}
Id Builder::makeFpConstant(Id type, double d, bool specConstant)
{
assert(isFloatType(type));
#ifdef GLSLANG_WEB
const int width = 32;
assert(width == getScalarTypeWidth(type));
#else
const int width = getScalarTypeWidth(type);
#endif
switch (getScalarTypeWidth(type)) {
case 16:
return makeFloat16Constant((float)d, specConstant);
case 32:
return makeFloatConstant((float)d, specConstant);
case 64:
return makeDoubleConstant(d, specConstant);
default:
break;
}
assert(isFloatType(type));
assert(false);
return NoResult;
switch (width) {
case 16:
return makeFloat16Constant((float)d, specConstant);
case 32:
return makeFloatConstant((float)d, specConstant);
case 64:
return makeDoubleConstant(d, specConstant);
default:
break;
}
assert(false);
return NoResult;
}
Id Builder::findCompositeConstant(Op typeClass, const std::vector<Id>& comps)
Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps)
{
Instruction* constant = 0;
bool found = false;
for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
constant = groupedConstants[typeClass][i];
// same shape?
if (constant->getNumOperands() != (int)comps.size())
if (constant->getTypeId() != typeId)
continue;
// same contents?
@ -930,8 +1106,9 @@ Id Builder::makeCompositeConstant(Id typeId, const std::vector<Id>& members, boo
case OpTypeVector:
case OpTypeArray:
case OpTypeMatrix:
case OpTypeCooperativeMatrixNV:
if (! specConstant) {
Id existing = findCompositeConstant(typeClass, members);
Id existing = findCompositeConstant(typeClass, typeId, members);
if (existing)
return existing;
}
@ -1161,11 +1338,13 @@ void Builder::makeDiscard()
}
// Comments in header
Id Builder::createVariable(StorageClass storageClass, Id type, const char* name)
Id Builder::createVariable(StorageClass storageClass, Id type, const char* name, Id initializer)
{
Id pointerType = makePointer(storageClass, type);
Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable);
inst->addImmediateOperand(storageClass);
if (initializer != NoResult)
inst->addIdOperand(initializer);
switch (storageClass) {
case StorageClassFunction:
@ -1193,20 +1372,65 @@ Id Builder::createUndefined(Id type)
return inst->getResultId();
}
// av/vis/nonprivate are unnecessary and illegal for some storage classes.
spv::MemoryAccessMask Builder::sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) const
{
switch (sc) {
case spv::StorageClassUniform:
case spv::StorageClassWorkgroup:
case spv::StorageClassStorageBuffer:
case spv::StorageClassPhysicalStorageBufferEXT:
break;
default:
memoryAccess = spv::MemoryAccessMask(memoryAccess &
~(spv::MemoryAccessMakePointerAvailableKHRMask |
spv::MemoryAccessMakePointerVisibleKHRMask |
spv::MemoryAccessNonPrivatePointerKHRMask));
break;
}
return memoryAccess;
}
// Comments in header
void Builder::createStore(Id rValue, Id lValue)
void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
{
Instruction* store = new Instruction(OpStore);
store->addIdOperand(lValue);
store->addIdOperand(rValue);
memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
if (memoryAccess != MemoryAccessMaskNone) {
store->addImmediateOperand(memoryAccess);
if (memoryAccess & spv::MemoryAccessAlignedMask) {
store->addImmediateOperand(alignment);
}
if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) {
store->addIdOperand(makeUintConstant(scope));
}
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(store));
}
// Comments in header
Id Builder::createLoad(Id lValue)
Id Builder::createLoad(Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
{
Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad);
load->addIdOperand(lValue);
memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
if (memoryAccess != MemoryAccessMaskNone) {
load->addImmediateOperand(memoryAccess);
if (memoryAccess & spv::MemoryAccessAlignedMask) {
load->addImmediateOperand(alignment);
}
if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) {
load->addIdOperand(makeUintConstant(scope));
}
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(load));
return load->getResultId();
@ -1240,7 +1464,7 @@ Id Builder::createAccessChain(StorageClass storageClass, Id base, const std::vec
Id Builder::createArrayLength(Id base, unsigned int member)
{
spv::Id intType = makeIntType(32);
spv::Id intType = makeUintType(32);
Instruction* length = new Instruction(getUniqueId(), intType, OpArrayLength);
length->addIdOperand(base);
length->addImmediateOperand(member);
@ -1249,6 +1473,23 @@ Id Builder::createArrayLength(Id base, unsigned int member)
return length->getResultId();
}
Id Builder::createCooperativeMatrixLength(Id type)
{
spv::Id intType = makeUintType(32);
// Generate code for spec constants if in spec constant operation
// generation mode.
if (generatingOpCodeForSpecConst) {
return createSpecConstantOp(OpCooperativeMatrixLengthNV, intType, std::vector<Id>(1, type), std::vector<Id>());
}
Instruction* length = new Instruction(getUniqueId(), intType, OpCooperativeMatrixLengthNV);
length->addIdOperand(type);
buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
return length->getResultId();
}
Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index)
{
// Generate code for spec constants if in spec constant operation
@ -1331,7 +1572,7 @@ void Builder::createNoResultOp(Op opCode)
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
// An opcode that has one operand, no result id, and no type
// An opcode that has one id operand, no result id, and no type
void Builder::createNoResultOp(Op opCode, Id operand)
{
Instruction* op = new Instruction(opCode);
@ -1339,29 +1580,43 @@ void Builder::createNoResultOp(Op opCode, Id operand)
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
// An opcode that has one operand, no result id, and no type
// An opcode that has one or more operands, no result id, and no type
void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
{
Instruction* op = new Instruction(opCode);
for (auto it = operands.cbegin(); it != operands.cend(); ++it)
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
op->addIdOperand(*it);
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
// An opcode that has multiple operands, no result id, and no type
void Builder::createNoResultOp(Op opCode, const std::vector<IdImmediate>& operands)
{
Instruction* op = new Instruction(opCode);
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
if (it->isId)
op->addIdOperand(it->word);
else
op->addImmediateOperand(it->word);
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics)
{
Instruction* op = new Instruction(OpControlBarrier);
op->addImmediateOperand(makeUintConstant(execution));
op->addImmediateOperand(makeUintConstant(memory));
op->addImmediateOperand(makeUintConstant(semantics));
op->addIdOperand(makeUintConstant(execution));
op->addIdOperand(makeUintConstant(memory));
op->addIdOperand(makeUintConstant(semantics));
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
void Builder::createMemoryBarrier(unsigned executionScope, unsigned memorySemantics)
{
Instruction* op = new Instruction(OpMemoryBarrier);
op->addImmediateOperand(makeUintConstant(executionScope));
op->addImmediateOperand(makeUintConstant(memorySemantics));
op->addIdOperand(makeUintConstant(executionScope));
op->addIdOperand(makeUintConstant(memorySemantics));
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
@ -1428,6 +1683,20 @@ Id Builder::createOp(Op opCode, Id typeId, const std::vector<Id>& operands)
return op->getResultId();
}
Id Builder::createOp(Op opCode, Id typeId, const std::vector<IdImmediate>& operands)
{
Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
if (it->isId)
op->addIdOperand(it->word);
else
op->addImmediateOperand(it->word);
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
return op->getResultId();
}
Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector<Id>& operands, const std::vector<unsigned>& literals)
{
Instruction* op = new Instruction(getUniqueId(), typeId, OpSpecConstantOp);
@ -1570,7 +1839,8 @@ Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const
// Accept all parameters needed to create a texture instruction.
// Create the correct instruction based on the inputs, and make the call.
Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicitLod, const TextureParameters& parameters)
Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
bool noImplicitLod, const TextureParameters& parameters, ImageOperandsMask signExtensionMask)
{
static const int maxTextureArgs = 10;
Id texArgs[maxTextureArgs] = {};
@ -1587,11 +1857,18 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
if (parameters.component != NoResult)
texArgs[numArgs++] = parameters.component;
#ifndef GLSLANG_WEB
if (parameters.granularity != NoResult)
texArgs[numArgs++] = parameters.granularity;
if (parameters.coarse != NoResult)
texArgs[numArgs++] = parameters.coarse;
#endif
//
// Set up the optional arguments
//
int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments
++numArgs; // speculatively make room for the mask operand
int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments
++numArgs; // speculatively make room for the mask operand
ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand
if (parameters.bias) {
mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask);
@ -1623,9 +1900,11 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
texArgs[numArgs++] = parameters.offset;
}
if (parameters.offsets) {
addCapability(CapabilityImageGatherExtended);
mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
texArgs[numArgs++] = parameters.offsets;
}
#ifndef GLSLANG_WEB
if (parameters.sample) {
mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
texArgs[numArgs++] = parameters.sample;
@ -1637,6 +1916,14 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask);
texArgs[numArgs++] = parameters.lodClamp;
}
if (parameters.nonprivate) {
mask = mask | ImageOperandsNonPrivateTexelKHRMask;
}
if (parameters.volatil) {
mask = mask | ImageOperandsVolatileTexelKHRMask;
}
#endif
mask = mask | signExtensionMask;
if (mask == ImageOperandsMaskNone)
--numArgs; // undo speculative reservation for the mask argument
else
@ -1651,6 +1938,9 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
opCode = OpImageSparseFetch;
else
opCode = OpImageFetch;
#ifndef GLSLANG_WEB
} else if (parameters.granularity && parameters.coarse) {
opCode = OpImageSampleFootprintNV;
} else if (gather) {
if (parameters.Dref)
if (sparse)
@ -1662,6 +1952,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
opCode = OpImageSparseGather;
else
opCode = OpImageGather;
#endif
} else if (explicitLod) {
if (parameters.Dref) {
if (proj)
@ -1772,9 +2063,6 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
// Comments in header
Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters, bool isUnsignedResult)
{
// All these need a capability
addCapability(CapabilityImageQuery);
// Figure out the result type
Id resultType = 0;
switch (opCode) {
@ -1813,11 +2101,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
break;
}
case OpImageQueryLod:
#ifdef AMD_EXTENSIONS
resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
#else
resultType = makeVectorType(makeFloatType(32), 2);
#endif
break;
case OpImageQueryLevels:
case OpImageQuerySamples:
@ -1835,6 +2119,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
if (parameters.lod)
query->addIdOperand(parameters.lod);
buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
addCapability(CapabilityImageQuery);
return query->getResultId();
}
@ -1999,7 +2284,8 @@ Id Builder::createConstructor(Decoration precision, const std::vector<Id>& sourc
// Go through the source arguments, each one could have either
// a single or multiple components to contribute.
for (unsigned int i = 0; i < sources.size(); ++i) {
if (isScalar(sources[i]))
if (isScalar(sources[i]) || isPointer(sources[i]))
latchResult(sources[i]);
else if (isVector(sources[i]))
accumulateVectorConstituents(sources[i]);
@ -2027,9 +2313,44 @@ Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>&
int numRows = getTypeNumRows(resultTypeId);
Instruction* instr = module.getInstruction(componentTypeId);
Id bitCount = instr->getIdOperand(0);
#ifdef GLSLANG_WEB
const unsigned bitCount = 32;
assert(bitCount == instr->getImmediateOperand(0));
#else
const unsigned bitCount = instr->getImmediateOperand(0);
#endif
// Will use a two step process
// Optimize matrix constructed from a bigger matrix
if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
// To truncate the matrix to a smaller number of rows/columns, we need to:
// 1. For each column, extract the column and truncate it to the required size using shuffle
// 2. Assemble the resulting matrix from all columns
Id matrix = sources[0];
Id columnTypeId = getContainedTypeId(resultTypeId);
Id sourceColumnTypeId = getContainedTypeId(getTypeId(matrix));
std::vector<unsigned> channels;
for (int row = 0; row < numRows; ++row)
channels.push_back(row);
std::vector<Id> matrixColumns;
for (int col = 0; col < numCols; ++col) {
std::vector<unsigned> indexes;
indexes.push_back(col);
Id colv = createCompositeExtract(matrix, sourceColumnTypeId, indexes);
setPrecision(colv, precision);
if (numRows != getNumRows(matrix)) {
matrixColumns.push_back(createRvalueSwizzle(precision, columnTypeId, colv, channels));
} else {
matrixColumns.push_back(colv);
}
}
return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
}
// Otherwise, will use a two step process
// 1. make a compile-time 2D array of values
// 2. construct a matrix from that array
@ -2283,11 +2604,16 @@ void Builder::clearAccessChain()
accessChain.component = NoResult;
accessChain.preSwizzleBaseType = NoType;
accessChain.isRValue = false;
accessChain.coherentFlags.clear();
accessChain.alignment = 0;
}
// Comments in header
void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType)
void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
{
accessChain.coherentFlags |= coherentFlags;
accessChain.alignment |= alignment;
// swizzles can be stacked in GLSL, but simplified to a single
// one here; the base type doesn't change
if (accessChain.preSwizzleBaseType == NoType)
@ -2309,7 +2635,7 @@ void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizz
}
// Comments in header
void Builder::accessChainStore(Id rvalue)
void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
{
assert(accessChain.isRValue == false);
@ -2327,11 +2653,17 @@ void Builder::accessChainStore(Id rvalue)
source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle);
}
createStore(source, base);
// take LSB of alignment
alignment = alignment & ~(alignment & (alignment-1));
if (getStorageClass(base) == StorageClassPhysicalStorageBufferEXT) {
memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
}
createStore(source, base, memoryAccess, scope, alignment);
}
// Comments in header
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType)
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
{
Id id;
@ -2353,15 +2685,22 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
}
}
if (constant)
if (constant) {
id = createCompositeExtract(accessChain.base, swizzleBase, indexes);
else {
// make a new function variable for this r-value
Id lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
// store into it
createStore(accessChain.base, lValue);
} else {
Id lValue = NoResult;
if (spvVersion >= Spv_1_4) {
// make a new function variable for this r-value, using an initializer,
// and mark it as NonWritable so that downstream it can be detected as a lookup
// table
lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable",
accessChain.base);
addDecoration(lValue, DecorationNonWritable);
} else {
lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
// store into it
createStore(accessChain.base, lValue);
}
// move base to the new variable
accessChain.base = lValue;
accessChain.isRValue = false;
@ -2374,8 +2713,15 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
id = accessChain.base; // no precision, it was set when this was defined
} else {
transferAccessChainSwizzle(true);
// take LSB of alignment
alignment = alignment & ~(alignment & (alignment-1));
if (getStorageClass(accessChain.base) == StorageClassPhysicalStorageBufferEXT) {
memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
}
// load through the access chain
id = createLoad(collapseAccessChain());
id = createLoad(collapseAccessChain(), memoryAccess, scope, alignment);
setPrecision(id, precision);
addDecoration(id, nonUniform);
}
@ -2451,42 +2797,6 @@ Id Builder::accessChainGetInferredType()
return type;
}
// comment in header
void Builder::eliminateDeadDecorations() {
std::unordered_set<const Block*> reachable_blocks;
std::unordered_set<Id> unreachable_definitions;
// Collect IDs defined in unreachable blocks. For each function, label the
// reachable blocks first. Then for each unreachable block, collect the
// result IDs of the instructions in it.
for (std::vector<Function*>::const_iterator fi = module.getFunctions().cbegin();
fi != module.getFunctions().cend(); fi++) {
Function* f = *fi;
Block* entry = f->getEntryBlock();
inReadableOrder(entry, [&reachable_blocks](const Block* b) {
reachable_blocks.insert(b);
});
for (std::vector<Block*>::const_iterator bi = f->getBlocks().cbegin();
bi != f->getBlocks().cend(); bi++) {
Block* b = *bi;
if (!reachable_blocks.count(b)) {
for (std::vector<std::unique_ptr<Instruction> >::const_iterator
ii = b->getInstructions().cbegin();
ii != b->getInstructions().cend(); ii++) {
Instruction* i = ii->get();
unreachable_definitions.insert(i->getResultId());
}
}
}
}
decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
[&unreachable_definitions](std::unique_ptr<Instruction>& I) -> bool {
Instruction* inst = I.get();
Id decoration_id = inst->getIdOperand(0);
return unreachable_definitions.count(decoration_id) != 0;
}),
decorations.end());
}
void Builder::dump(std::vector<unsigned int>& out) const
{
// Header, before first instructions:
@ -2692,14 +3002,14 @@ void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control)
}
void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control,
unsigned int dependencyLength)
const std::vector<unsigned int>& operands)
{
Instruction* merge = new Instruction(OpLoopMerge);
merge->addIdOperand(mergeBlock->getId());
merge->addIdOperand(continueBlock->getId());
merge->addImmediateOperand(control);
if ((control & LoopControlDependencyLengthMask) != 0)
merge->addImmediateOperand(dependencyLength);
for (int op = 0; op < (int)operands.size(); ++op)
merge->addImmediateOperand(operands[op]);
buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
}
@ -2717,7 +3027,8 @@ void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* els
// OpSource
// [OpSourceContinued]
// ...
void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
void Builder::dumpSourceInstructions(const spv::Id fileId, const std::string& text,
std::vector<unsigned int>& out) const
{
const int maxWordCount = 0xFFFF;
const int opSourceWordCount = 4;
@ -2729,14 +3040,14 @@ void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
sourceInst.addImmediateOperand(source);
sourceInst.addImmediateOperand(sourceVersion);
// File operand
if (sourceFileStringId != NoResult) {
sourceInst.addIdOperand(sourceFileStringId);
if (fileId != NoResult) {
sourceInst.addIdOperand(fileId);
// Source operand
if (sourceText.size() > 0) {
if (text.size() > 0) {
int nextByte = 0;
std::string subString;
while ((int)sourceText.size() - nextByte > 0) {
subString = sourceText.substr(nextByte, nonNullBytesPerInstruction);
while ((int)text.size() - nextByte > 0) {
subString = text.substr(nextByte, nonNullBytesPerInstruction);
if (nextByte == 0) {
// OpSource
sourceInst.addStringOperand(subString.c_str());
@ -2756,6 +3067,14 @@ void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
}
}
// Dump an OpSource[Continued] sequence for the source and every include file
void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
{
dumpSourceInstructions(sourceFileStringId, sourceText, out);
for (auto iItr = includeFiles.begin(); iItr != includeFiles.end(); ++iItr)
dumpSourceInstructions(iItr->first, *iItr->second, out);
}
void Builder::dumpInstructions(std::vector<unsigned int>& out, const std::vector<std::unique_ptr<Instruction> >& instructions) const
{
for (int i = 0; i < (int)instructions.size(); ++i) {

188
Externals/glslang/SPIRV/SpvBuilder.h vendored Executable file → Normal file
View File

@ -1,6 +1,6 @@
//
// Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2015-2016 Google, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
@ -57,9 +57,19 @@
#include <sstream>
#include <stack>
#include <unordered_map>
#include <map>
namespace spv {
typedef enum {
Spv_1_0 = (1 << 16),
Spv_1_1 = (1 << 16) | (1 << 8),
Spv_1_2 = (1 << 16) | (2 << 8),
Spv_1_3 = (1 << 16) | (3 << 8),
Spv_1_4 = (1 << 16) | (4 << 8),
Spv_1_5 = (1 << 16) | (5 << 8),
} SpvVersion;
class Builder {
public:
Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger);
@ -74,18 +84,47 @@ public:
source = lang;
sourceVersion = version;
}
spv::Id getStringId(const std::string& str)
{
auto sItr = stringIds.find(str);
if (sItr != stringIds.end())
return sItr->second;
spv::Id strId = getUniqueId();
Instruction* fileString = new Instruction(strId, NoType, OpString);
const char* file_c_str = str.c_str();
fileString->addStringOperand(file_c_str);
strings.push_back(std::unique_ptr<Instruction>(fileString));
stringIds[file_c_str] = strId;
return strId;
}
void setSourceFile(const std::string& file)
{
Instruction* fileString = new Instruction(getUniqueId(), NoType, OpString);
fileString->addStringOperand(file.c_str());
sourceFileStringId = fileString->getResultId();
strings.push_back(std::unique_ptr<Instruction>(fileString));
sourceFileStringId = getStringId(file);
}
void setSourceText(const std::string& text) { sourceText = text; }
void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); }
void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
void setEmitOpLines() { emitOpLines = true; }
void addExtension(const char* ext) { extensions.insert(ext); }
void removeExtension(const char* ext)
{
extensions.erase(ext);
}
void addIncorporatedExtension(const char* ext, SpvVersion incorporatedVersion)
{
if (getSpvVersion() < static_cast<unsigned>(incorporatedVersion))
addExtension(ext);
}
void promoteIncorporatedExtension(const char* baseExt, const char* promoExt, SpvVersion incorporatedVersion)
{
removeExtension(baseExt);
addIncorporatedExtension(promoExt, incorporatedVersion);
}
void addInclude(const std::string& name, const std::string& text)
{
spv::Id incId = getStringId(name);
includeFiles[incId] = &text;
}
Id import(const char*);
void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem)
{
@ -106,16 +145,25 @@ public:
return id;
}
// Log the current line, and if different than the last one,
// issue a new OpLine, using the current file name.
// Generate OpLine for non-filename-based #line directives (ie no filename
// seen yet): Log the current line, and if different than the last one,
// issue a new OpLine using the new line and current source file name.
void setLine(int line);
// If filename null, generate OpLine for non-filename-based line directives,
// else do filename-based: Log the current line and file, and if different
// than the last one, issue a new OpLine using the new line and file
// name.
void setLine(int line, const char* filename);
// Low-level OpLine. See setLine() for a layered helper.
void addLine(Id fileName, int line, int column);
// For creating new types (will return old type if the requested one was already made).
Id makeVoidType();
Id makeBoolType();
Id makePointer(StorageClass, Id type);
Id makePointer(StorageClass, Id pointee);
Id makeForwardPointer(StorageClass);
Id makePointerFromForwardPointer(StorageClass, Id forwardPointerType, Id pointee);
Id makeIntegerType(int width, bool hasSign); // generic
Id makeIntType(int width) { return makeIntegerType(width, true); }
Id makeUintType(int width) { return makeIntegerType(width, false); }
@ -130,6 +178,10 @@ public:
Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format);
Id makeSamplerType();
Id makeSampledImageType(Id imageType);
Id makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols);
// accelerationStructureNV type
Id makeAccelerationStructureNVType();
// For querying about types.
Id getTypeId(Id resultId) const { return module.getTypeId(resultId); }
@ -150,6 +202,7 @@ public:
bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); }
bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); }
bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); }
bool isCooperativeMatrix(Id resultId)const { return isCooperativeMatrixType(getTypeId(resultId)); }
bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); }
bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
@ -163,10 +216,17 @@ public:
bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId); }
#ifdef GLSLANG_WEB
bool isCooperativeMatrixType(Id typeId)const { return false; }
#else
bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; }
#endif
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); }
bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
bool containsType(Id typeId, Op typeOp, unsigned int width) const;
bool containsPhysicalStorageBufferOrArray(Id typeId) const;
bool isConstantOpCode(Op opcode) const;
bool isSpecConstantOpCode(Op opcode) const;
@ -267,16 +327,16 @@ public:
void makeDiscard();
// Create a global or function local or IO variable.
Id createVariable(StorageClass, Id type, const char* name = 0);
Id createVariable(StorageClass, Id type, const char* name = 0, Id initializer = NoResult);
// Create an intermediate with an undefined value.
Id createUndefined(Id type);
// Store into an Id and return the l-value
void createStore(Id rValue, Id lValue);
void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
// Load from an Id and return it
Id createLoad(Id lValue);
Id createLoad(Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
// Create an OpAccessChain instruction
Id createAccessChain(StorageClass, Id base, const std::vector<Id>& offsets);
@ -284,6 +344,9 @@ public:
// Create an OpArrayLength instruction
Id createArrayLength(Id base, unsigned int member);
// Create an OpCooperativeMatrixLengthNV instruction
Id createCooperativeMatrixLength(Id type);
// Create an OpCompositeExtract instruction
Id createCompositeExtract(Id composite, Id typeId, unsigned index);
Id createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes);
@ -296,12 +359,14 @@ public:
void createNoResultOp(Op);
void createNoResultOp(Op, Id operand);
void createNoResultOp(Op, const std::vector<Id>& operands);
void createNoResultOp(Op, const std::vector<IdImmediate>& operands);
void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
Id createUnaryOp(Op, Id typeId, Id operand);
Id createBinOp(Op, Id typeId, Id operand1, Id operand2);
Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3);
Id createOp(Op, Id typeId, const std::vector<Id>& operands);
Id createOp(Op, Id typeId, const std::vector<IdImmediate>& operands);
Id createFunctionCall(spv::Function*, const std::vector<spv::Id>&);
Id createSpecConstantOp(Op, Id typeId, const std::vector<spv::Id>& operands, const std::vector<unsigned>& literals);
@ -363,10 +428,15 @@ public:
Id component;
Id texelOut;
Id lodClamp;
Id granularity;
Id coarse;
bool nonprivate;
bool volatil;
};
// Select the correct texture operation based on all inputs, and emit the correct instruction
Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicit, const TextureParameters&);
Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
bool noImplicit, const TextureParameters&, ImageOperandsMask);
// Emit the OpTextureQuery* instruction that was passed in.
// Figure out the right return value and type, and return it.
@ -502,6 +572,52 @@ public:
Id component; // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value
unsigned int alignment; // bitwise OR of alignment values passed in. Accumulates worst alignment. Only tracks base and (optional) component selection alignment.
// Accumulate whether anything in the chain of structures has coherent decorations.
struct CoherentFlags {
CoherentFlags() { clear(); }
#ifdef GLSLANG_WEB
void clear() { }
bool isVolatile() const { return false; }
CoherentFlags operator |=(const CoherentFlags &other) { return *this; }
#else
bool isVolatile() const { return volatil; }
unsigned coherent : 1;
unsigned devicecoherent : 1;
unsigned queuefamilycoherent : 1;
unsigned workgroupcoherent : 1;
unsigned subgroupcoherent : 1;
unsigned nonprivate : 1;
unsigned volatil : 1;
unsigned isImage : 1;
void clear() {
coherent = 0;
devicecoherent = 0;
queuefamilycoherent = 0;
workgroupcoherent = 0;
subgroupcoherent = 0;
nonprivate = 0;
volatil = 0;
isImage = 0;
}
CoherentFlags operator |=(const CoherentFlags &other) {
coherent |= other.coherent;
devicecoherent |= other.devicecoherent;
queuefamilycoherent |= other.queuefamilycoherent;
workgroupcoherent |= other.workgroupcoherent;
subgroupcoherent |= other.subgroupcoherent;
nonprivate |= other.nonprivate;
volatil |= other.volatil;
isImage |= other.isImage;
return *this;
}
#endif
};
CoherentFlags coherentFlags;
};
//
@ -531,30 +647,34 @@ public:
}
// push offset onto the end of the chain
void accessChainPush(Id offset)
void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
{
accessChain.indexChain.push_back(offset);
accessChain.coherentFlags |= coherentFlags;
accessChain.alignment |= alignment;
}
// push new swizzle onto the end of any existing swizzle, merging into a single swizzle
void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType);
void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment);
// push a dynamic component selection onto the access chain, only applicable with a
// non-trivial swizzle or no swizzle
void accessChainPushComponent(Id component, Id preSwizzleBaseType)
void accessChainPushComponent(Id component, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
{
if (accessChain.swizzle.size() != 1) {
accessChain.component = component;
if (accessChain.preSwizzleBaseType == NoType)
accessChain.preSwizzleBaseType = preSwizzleBaseType;
}
accessChain.coherentFlags |= coherentFlags;
accessChain.alignment |= alignment;
}
// use accessChain and swizzle to store value
void accessChainStore(Id rvalue);
void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
// use accessChain and swizzle to load an r-value
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType);
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
// get the direct pointer for an l-value
Id accessChainGetLValue();
@ -563,14 +683,27 @@ public:
// based on the type of the base and the chain of dereferences.
Id accessChainGetInferredType();
// Remove OpDecorate instructions whose operands are defined in unreachable
// blocks.
void eliminateDeadDecorations();
// Add capabilities, extensions, remove unneeded decorations, etc.,
// based on the resulting SPIR-V.
void postProcess();
// Prune unreachable blocks in the CFG and remove unneeded decorations.
void postProcessCFG();
#ifndef GLSLANG_WEB
// Add capabilities, extensions based on instructions in the module.
void postProcessFeatures();
// Hook to visit each instruction in a block in a function
void postProcess(Instruction&);
// Hook to visit each non-32-bit sized float/int operation in a block.
void postProcessType(const Instruction&, spv::Id typeId);
#endif
void dump(std::vector<unsigned int>&) const;
void createBranch(Block* block);
void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, unsigned int dependencyLength);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, const std::vector<unsigned int>& operands);
// Sets to generate opcode for specialization constants.
void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
@ -584,7 +717,7 @@ public:
Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2);
Id findCompositeConstant(Op typeClass, const std::vector<Id>& comps);
Id findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps);
Id findStructConstant(Id typeId, const std::vector<Id>& comps);
Id collapseAccessChain();
void remapDynamicSwizzle();
@ -593,8 +726,10 @@ public:
void createAndSetNoPredecessorBlock(const char*);
void createSelectionMerge(Block* mergeBlock, unsigned int control);
void dumpSourceInstructions(std::vector<unsigned int>&) const;
void dumpSourceInstructions(const spv::Id fileId, const std::string& text, std::vector<unsigned int>&) const;
void dumpInstructions(std::vector<unsigned int>&, const std::vector<std::unique_ptr<Instruction> >&) const;
void dumpModuleProcesses(std::vector<unsigned int>&) const;
spv::MemoryAccessMask sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) const;
unsigned int spvVersion; // the version of SPIR-V to emit in the header
SourceLanguage source;
@ -602,6 +737,7 @@ public:
spv::Id sourceFileStringId;
std::string sourceText;
int currentLine;
const char* currentFile;
bool emitOpLines;
std::set<std::string> extensions;
std::vector<const char*> sourceExtensions;
@ -639,6 +775,12 @@ public:
// Our loop stack.
std::stack<LoopBlocks> loops;
// map from strings to their string ids
std::unordered_map<std::string, spv::Id> stringIds;
// map from include file name ids to their contents
std::map<spv::Id, const std::string*> includeFiles;
// The stream for outputting warnings and errors.
SpvBuildLogger* logger;
}; // end Builder class

View File

@ -0,0 +1,450 @@
//
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Post-processing for SPIR-V IR, in internal form, not standard binary form.
//
#include <cassert>
#include <cstdlib>
#include <unordered_map>
#include <unordered_set>
#include <algorithm>
#include "SpvBuilder.h"
#include "spirv.hpp"
#include "GlslangToSpv.h"
#include "SpvBuilder.h"
namespace spv {
#include "GLSL.std.450.h"
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#include "GLSL.ext.AMD.h"
#include "GLSL.ext.NV.h"
}
namespace spv {
#ifndef GLSLANG_WEB
// Hook to visit each operand type and result type of an instruction.
// Will be called multiple times for one instruction, once for each typed
// operand and the result.
void Builder::postProcessType(const Instruction& inst, Id typeId)
{
// Characterize the type being questioned
Id basicTypeOp = getMostBasicTypeClass(typeId);
int width = 0;
if (basicTypeOp == OpTypeFloat || basicTypeOp == OpTypeInt)
width = getScalarTypeWidth(typeId);
// Do opcode-specific checks
switch (inst.getOpCode()) {
case OpLoad:
case OpStore:
if (basicTypeOp == OpTypeStruct) {
if (containsType(typeId, OpTypeInt, 8))
addCapability(CapabilityInt8);
if (containsType(typeId, OpTypeInt, 16))
addCapability(CapabilityInt16);
if (containsType(typeId, OpTypeFloat, 16))
addCapability(CapabilityFloat16);
} else {
StorageClass storageClass = getStorageClass(inst.getIdOperand(0));
if (width == 8) {
switch (storageClass) {
case StorageClassPhysicalStorageBufferEXT:
case StorageClassUniform:
case StorageClassStorageBuffer:
case StorageClassPushConstant:
break;
default:
addCapability(CapabilityInt8);
break;
}
} else if (width == 16) {
switch (storageClass) {
case StorageClassPhysicalStorageBufferEXT:
case StorageClassUniform:
case StorageClassStorageBuffer:
case StorageClassPushConstant:
case StorageClassInput:
case StorageClassOutput:
break;
default:
if (basicTypeOp == OpTypeInt)
addCapability(CapabilityInt16);
if (basicTypeOp == OpTypeFloat)
addCapability(CapabilityFloat16);
break;
}
}
}
break;
case OpAccessChain:
case OpPtrAccessChain:
case OpCopyObject:
break;
case OpFConvert:
case OpSConvert:
case OpUConvert:
// Look for any 8/16-bit storage capabilities. If there are none, assume that
// the convert instruction requires the Float16/Int8/16 capability.
if (containsType(typeId, OpTypeFloat, 16) || containsType(typeId, OpTypeInt, 16)) {
bool foundStorage = false;
for (auto it = capabilities.begin(); it != capabilities.end(); ++it) {
spv::Capability cap = *it;
if (cap == spv::CapabilityStorageInputOutput16 ||
cap == spv::CapabilityStoragePushConstant16 ||
cap == spv::CapabilityStorageUniformBufferBlock16 ||
cap == spv::CapabilityStorageUniform16) {
foundStorage = true;
break;
}
}
if (!foundStorage) {
if (containsType(typeId, OpTypeFloat, 16))
addCapability(CapabilityFloat16);
if (containsType(typeId, OpTypeInt, 16))
addCapability(CapabilityInt16);
}
}
if (containsType(typeId, OpTypeInt, 8)) {
bool foundStorage = false;
for (auto it = capabilities.begin(); it != capabilities.end(); ++it) {
spv::Capability cap = *it;
if (cap == spv::CapabilityStoragePushConstant8 ||
cap == spv::CapabilityUniformAndStorageBuffer8BitAccess ||
cap == spv::CapabilityStorageBuffer8BitAccess) {
foundStorage = true;
break;
}
}
if (!foundStorage) {
addCapability(CapabilityInt8);
}
}
break;
case OpExtInst:
switch (inst.getImmediateOperand(1)) {
case GLSLstd450Frexp:
case GLSLstd450FrexpStruct:
if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeInt, 16))
addExtension(spv::E_SPV_AMD_gpu_shader_int16);
break;
case GLSLstd450InterpolateAtCentroid:
case GLSLstd450InterpolateAtSample:
case GLSLstd450InterpolateAtOffset:
if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeFloat, 16))
addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
break;
default:
break;
}
break;
default:
if (basicTypeOp == OpTypeFloat && width == 16)
addCapability(CapabilityFloat16);
if (basicTypeOp == OpTypeInt && width == 16)
addCapability(CapabilityInt16);
if (basicTypeOp == OpTypeInt && width == 8)
addCapability(CapabilityInt8);
break;
}
}
// Called for each instruction that resides in a block.
void Builder::postProcess(Instruction& inst)
{
// Add capabilities based simply on the opcode.
switch (inst.getOpCode()) {
case OpExtInst:
switch (inst.getImmediateOperand(1)) {
case GLSLstd450InterpolateAtCentroid:
case GLSLstd450InterpolateAtSample:
case GLSLstd450InterpolateAtOffset:
addCapability(CapabilityInterpolationFunction);
break;
default:
break;
}
break;
case OpDPdxFine:
case OpDPdyFine:
case OpFwidthFine:
case OpDPdxCoarse:
case OpDPdyCoarse:
case OpFwidthCoarse:
addCapability(CapabilityDerivativeControl);
break;
case OpImageQueryLod:
case OpImageQuerySize:
case OpImageQuerySizeLod:
case OpImageQuerySamples:
case OpImageQueryLevels:
addCapability(CapabilityImageQuery);
break;
case OpGroupNonUniformPartitionNV:
addExtension(E_SPV_NV_shader_subgroup_partitioned);
addCapability(CapabilityGroupNonUniformPartitionedNV);
break;
case OpLoad:
case OpStore:
{
// For any load/store to a PhysicalStorageBufferEXT, walk the accesschain
// index list to compute the misalignment. The pre-existing alignment value
// (set via Builder::AccessChain::alignment) only accounts for the base of
// the reference type and any scalar component selection in the accesschain,
// and this function computes the rest from the SPIR-V Offset decorations.
Instruction *accessChain = module.getInstruction(inst.getIdOperand(0));
if (accessChain->getOpCode() == OpAccessChain) {
Instruction *base = module.getInstruction(accessChain->getIdOperand(0));
// Get the type of the base of the access chain. It must be a pointer type.
Id typeId = base->getTypeId();
Instruction *type = module.getInstruction(typeId);
assert(type->getOpCode() == OpTypePointer);
if (type->getImmediateOperand(0) != StorageClassPhysicalStorageBufferEXT) {
break;
}
// Get the pointee type.
typeId = type->getIdOperand(1);
type = module.getInstruction(typeId);
// Walk the index list for the access chain. For each index, find any
// misalignment that can apply when accessing the member/element via
// Offset/ArrayStride/MatrixStride decorations, and bitwise OR them all
// together.
int alignment = 0;
for (int i = 1; i < accessChain->getNumOperands(); ++i) {
Instruction *idx = module.getInstruction(accessChain->getIdOperand(i));
if (type->getOpCode() == OpTypeStruct) {
assert(idx->getOpCode() == OpConstant);
unsigned int c = idx->getImmediateOperand(0);
const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
if (decoration.get()->getOpCode() == OpMemberDecorate &&
decoration.get()->getIdOperand(0) == typeId &&
decoration.get()->getImmediateOperand(1) == c &&
(decoration.get()->getImmediateOperand(2) == DecorationOffset ||
decoration.get()->getImmediateOperand(2) == DecorationMatrixStride)) {
alignment |= decoration.get()->getImmediateOperand(3);
}
};
std::for_each(decorations.begin(), decorations.end(), function);
// get the next member type
typeId = type->getIdOperand(c);
type = module.getInstruction(typeId);
} else if (type->getOpCode() == OpTypeArray ||
type->getOpCode() == OpTypeRuntimeArray) {
const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
if (decoration.get()->getOpCode() == OpDecorate &&
decoration.get()->getIdOperand(0) == typeId &&
decoration.get()->getImmediateOperand(1) == DecorationArrayStride) {
alignment |= decoration.get()->getImmediateOperand(2);
}
};
std::for_each(decorations.begin(), decorations.end(), function);
// Get the element type
typeId = type->getIdOperand(0);
type = module.getInstruction(typeId);
} else {
// Once we get to any non-aggregate type, we're done.
break;
}
}
assert(inst.getNumOperands() >= 3);
unsigned int memoryAccess = inst.getImmediateOperand((inst.getOpCode() == OpStore) ? 2 : 1);
assert(memoryAccess & MemoryAccessAlignedMask);
static_cast<void>(memoryAccess);
// Compute the index of the alignment operand.
int alignmentIdx = 2;
if (inst.getOpCode() == OpStore)
alignmentIdx++;
// Merge new and old (mis)alignment
alignment |= inst.getImmediateOperand(alignmentIdx);
// Pick the LSB
alignment = alignment & ~(alignment & (alignment-1));
// update the Aligned operand
inst.setImmediateOperand(alignmentIdx, alignment);
}
break;
}
default:
break;
}
// Checks based on type
if (inst.getTypeId() != NoType)
postProcessType(inst, inst.getTypeId());
for (int op = 0; op < inst.getNumOperands(); ++op) {
if (inst.isIdOperand(op)) {
// In blocks, these are always result ids, but we are relying on
// getTypeId() to return NoType for things like OpLabel.
if (getTypeId(inst.getIdOperand(op)) != NoType)
postProcessType(inst, getTypeId(inst.getIdOperand(op)));
}
}
}
#endif
// comment in header
void Builder::postProcessCFG()
{
// reachableBlocks is the set of blockss reached via control flow, or which are
// unreachable continue targert or unreachable merge.
std::unordered_set<const Block*> reachableBlocks;
std::unordered_map<Block*, Block*> headerForUnreachableContinue;
std::unordered_set<Block*> unreachableMerges;
std::unordered_set<Id> unreachableDefinitions;
// Collect IDs defined in unreachable blocks. For each function, label the
// reachable blocks first. Then for each unreachable block, collect the
// result IDs of the instructions in it.
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
Function* f = *fi;
Block* entry = f->getEntryBlock();
inReadableOrder(entry,
[&reachableBlocks, &unreachableMerges, &headerForUnreachableContinue]
(Block* b, ReachReason why, Block* header) {
reachableBlocks.insert(b);
if (why == ReachDeadContinue) headerForUnreachableContinue[b] = header;
if (why == ReachDeadMerge) unreachableMerges.insert(b);
});
for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
Block* b = *bi;
if (unreachableMerges.count(b) != 0 || headerForUnreachableContinue.count(b) != 0) {
auto ii = b->getInstructions().cbegin();
++ii; // Keep potential decorations on the label.
for (; ii != b->getInstructions().cend(); ++ii)
unreachableDefinitions.insert(ii->get()->getResultId());
} else if (reachableBlocks.count(b) == 0) {
// The normal case for unreachable code. All definitions are considered dead.
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ++ii)
unreachableDefinitions.insert(ii->get()->getResultId());
}
}
}
// Modify unreachable merge blocks and unreachable continue targets.
// Delete their contents.
for (auto mergeIter = unreachableMerges.begin(); mergeIter != unreachableMerges.end(); ++mergeIter) {
(*mergeIter)->rewriteAsCanonicalUnreachableMerge();
}
for (auto continueIter = headerForUnreachableContinue.begin();
continueIter != headerForUnreachableContinue.end();
++continueIter) {
Block* continue_target = continueIter->first;
Block* header = continueIter->second;
continue_target->rewriteAsCanonicalUnreachableContinue(header);
}
// Remove unneeded decorations, for unreachable instructions
decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
[&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool {
Id decoration_id = I.get()->getIdOperand(0);
return unreachableDefinitions.count(decoration_id) != 0;
}),
decorations.end());
}
#ifndef GLSLANG_WEB
// comment in header
void Builder::postProcessFeatures() {
// Add per-instruction capabilities, extensions, etc.,
// Look for any 8/16 bit type in physical storage buffer class, and set the
// appropriate capability. This happens in createSpvVariable for other storage
// classes, but there isn't always a variable for physical storage buffer.
for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
Instruction* type = groupedTypes[OpTypePointer][t];
if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5);
addCapability(spv::CapabilityStorageBuffer8BitAccess);
}
if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3);
addCapability(spv::CapabilityStorageBuffer16BitAccess);
}
}
}
// process all block-contained instructions
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
Function* f = *fi;
for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
Block* b = *bi;
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
postProcess(*ii->get());
// For all local variables that contain pointers to PhysicalStorageBufferEXT, check whether
// there is an existing restrict/aliased decoration. If we don't find one, add Aliased as the
// default.
for (auto vi = b->getLocalVariables().cbegin(); vi != b->getLocalVariables().cend(); vi++) {
const Instruction& inst = *vi->get();
Id resultId = inst.getResultId();
if (containsPhysicalStorageBufferOrArray(getDerefTypeId(resultId))) {
bool foundDecoration = false;
const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
if (decoration.get()->getIdOperand(0) == resultId &&
decoration.get()->getOpCode() == OpDecorate &&
(decoration.get()->getImmediateOperand(1) == spv::DecorationAliasedPointerEXT ||
decoration.get()->getImmediateOperand(1) == spv::DecorationRestrictPointerEXT)) {
foundDecoration = true;
}
};
std::for_each(decorations.begin(), decorations.end(), function);
if (!foundDecoration) {
addDecoration(resultId, spv::DecorationAliasedPointerEXT);
}
}
}
}
}
}
#endif
// comment in header
void Builder::postProcess() {
postProcessCFG();
#ifndef GLSLANG_WEB
postProcessFeatures();
#endif
}
}; // end spv namespace

216
Externals/glslang/SPIRV/SpvTools.cpp vendored Normal file
View File

@ -0,0 +1,216 @@
//
// Copyright (C) 2014-2016 LunarG, Inc.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Call into SPIRV-Tools to disassemble, validate, and optimize.
//
#if ENABLE_OPT
#include <cstdio>
#include <iostream>
#include "SpvTools.h"
#include "spirv-tools/optimizer.hpp"
#include "spirv-tools/libspirv.h"
namespace glslang {
// Translate glslang's view of target versioning to what SPIRV-Tools uses.
spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger)
{
switch (spvVersion.vulkan) {
case glslang::EShTargetVulkan_1_0:
return spv_target_env::SPV_ENV_VULKAN_1_0;
case glslang::EShTargetVulkan_1_1:
switch (spvVersion.spv) {
case EShTargetSpv_1_0:
case EShTargetSpv_1_1:
case EShTargetSpv_1_2:
case EShTargetSpv_1_3:
return spv_target_env::SPV_ENV_VULKAN_1_1;
case EShTargetSpv_1_4:
return spv_target_env::SPV_ENV_VULKAN_1_1_SPIRV_1_4;
default:
logger->missingFunctionality("Target version for SPIRV-Tools validator");
return spv_target_env::SPV_ENV_VULKAN_1_1;
}
case glslang::EShTargetVulkan_1_2:
return spv_target_env::SPV_ENV_VULKAN_1_2;
default:
break;
}
if (spvVersion.openGl > 0)
return spv_target_env::SPV_ENV_OPENGL_4_5;
logger->missingFunctionality("Target version for SPIRV-Tools validator");
return spv_target_env::SPV_ENV_UNIVERSAL_1_0;
}
// Use the SPIRV-Tools disassembler to print SPIR-V.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv)
{
// disassemble
spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
spv_text text;
spv_diagnostic diagnostic = nullptr;
spvBinaryToText(context, spirv.data(), spirv.size(),
SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT,
&text, &diagnostic);
// dump
if (diagnostic == nullptr)
out << text->str;
else
spvDiagnosticPrint(diagnostic);
// teardown
spvDiagnosticDestroy(diagnostic);
spvContextDestroy(context);
}
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger, bool prelegalization)
{
// validate
spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger));
spv_const_binary_t binary = { spirv.data(), spirv.size() };
spv_diagnostic diagnostic = nullptr;
spv_validator_options options = spvValidatorOptionsCreate();
spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets());
spvValidatorOptionsSetBeforeHlslLegalization(options, prelegalization);
spvValidateWithOptions(context, options, &binary, &diagnostic);
// report
if (diagnostic != nullptr) {
logger->error("SPIRV-Tools Validation Errors");
logger->error(diagnostic->error);
}
// tear down
spvValidatorOptionsDestroy(options);
spvDiagnosticDestroy(diagnostic);
spvContextDestroy(context);
}
// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
// legalizing HLSL SPIR-V.
void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*, const SpvOptions* options)
{
spv_target_env target_env = SPV_ENV_UNIVERSAL_1_2;
spvtools::Optimizer optimizer(target_env);
optimizer.SetMessageConsumer(
[](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) {
auto &out = std::cerr;
switch (level)
{
case SPV_MSG_FATAL:
case SPV_MSG_INTERNAL_ERROR:
case SPV_MSG_ERROR:
out << "error: ";
break;
case SPV_MSG_WARNING:
out << "warning: ";
break;
case SPV_MSG_INFO:
case SPV_MSG_DEBUG:
out << "info: ";
break;
default:
break;
}
if (source)
{
out << source << ":";
}
out << position.line << ":" << position.column << ":" << position.index << ":";
if (message)
{
out << " " << message;
}
out << std::endl;
});
// If debug (specifically source line info) is being generated, propagate
// line information into all SPIR-V instructions. This avoids loss of
// information when instructions are deleted or moved. Later, remove
// redundant information to minimize final SPRIR-V size.
if (options->generateDebugInfo) {
optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
}
optimizer.RegisterPass(spvtools::CreateWrapOpKillPass());
optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
optimizer.RegisterPass(spvtools::CreateMergeReturnPass());
optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass());
optimizer.RegisterPass(spvtools::CreateEliminateDeadFunctionsPass());
optimizer.RegisterPass(spvtools::CreateScalarReplacementPass());
optimizer.RegisterPass(spvtools::CreateLocalAccessChainConvertPass());
optimizer.RegisterPass(spvtools::CreateLocalSingleBlockLoadStoreElimPass());
optimizer.RegisterPass(spvtools::CreateLocalSingleStoreElimPass());
optimizer.RegisterPass(spvtools::CreateSimplificationPass());
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateVectorDCEPass());
optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
optimizer.RegisterPass(spvtools::CreateBlockMergePass());
optimizer.RegisterPass(spvtools::CreateLocalMultiStoreElimPass());
optimizer.RegisterPass(spvtools::CreateIfConversionPass());
optimizer.RegisterPass(spvtools::CreateSimplificationPass());
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateVectorDCEPass());
optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
if (options->optimizeSize) {
optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass());
}
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
if (options->generateDebugInfo) {
optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass());
}
spvtools::OptimizerOptions spvOptOptions;
spvOptOptions.set_run_validator(false); // The validator may run as a seperate step later on
optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions);
}
}; // end namespace glslang
#endif

82
Externals/glslang/SPIRV/SpvTools.h vendored Normal file
View File

@ -0,0 +1,82 @@
//
// Copyright (C) 2014-2016 LunarG, Inc.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Call into SPIRV-Tools to disassemble, validate, and optimize.
//
#pragma once
#ifndef GLSLANG_SPV_TOOLS_H
#define GLSLANG_SPV_TOOLS_H
#ifdef ENABLE_OPT
#include <vector>
#include <ostream>
#endif
#include "glslang/MachineIndependent/localintermediate.h"
#include "Logger.h"
namespace glslang {
struct SpvOptions {
SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
optimizeSize(false), disassemble(false), validate(false) { }
bool generateDebugInfo;
bool disableOptimizer;
bool optimizeSize;
bool disassemble;
bool validate;
};
#ifdef ENABLE_OPT
// Use the SPIRV-Tools disassembler to print SPIR-V.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*, bool prelegalization);
// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
// legalizing HLSL SPIR-V.
void SpirvToolsLegalize(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*, const SpvOptions*);
#endif
} // end namespace glslang
#endif // GLSLANG_SPV_TOOLS_H

View File

@ -26,7 +26,7 @@ Dest BitwiseCast(Src source) {
Dest dest;
static_assert(sizeof(source) == sizeof(dest),
"BitwiseCast: Source and destination must have the same size");
std::memcpy(&dest, &source, sizeof(dest));
std::memcpy(static_cast<void*>(&dest), &source, sizeof(dest));
return dest;
}

95
Externals/glslang/SPIRV/disassemble.cpp vendored Executable file → Normal file
View File

@ -46,31 +46,22 @@
#include "disassemble.h"
#include "doc.h"
#include "SpvTools.h"
namespace spv {
extern "C" {
// Include C-based headers that don't have a namespace
#include "GLSL.std.450.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
namespace spv {
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char*, unsigned);
#endif
static void Kill(std::ostream& out, const char* message)
{
@ -81,15 +72,8 @@ static void Kill(std::ostream& out, const char* message)
// used to identify the extended instruction library imported when printing
enum ExtInstSet {
GLSL450Inst,
#ifdef AMD_EXTENSIONS
GLSLextAMDInst,
#endif
#ifdef NV_EXTENSIONS
GLSLextNVInst,
#endif
OpenCLExtInst,
};
@ -498,35 +482,29 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
const char* name = idDescriptor[stream[word - 2]].c_str();
if (0 == memcmp("OpenCL", name, 6)) {
extInstSet = OpenCLExtInst;
#ifdef AMD_EXTENSIONS
} else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
extInstSet = GLSLextAMDInst;
#endif
#ifdef NV_EXTENSIONS
}else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
} else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0) {
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) {
extInstSet = GLSLextNVInst;
#endif
}
unsigned entrypoint = stream[word - 1];
if (extInstSet == GLSL450Inst) {
if (entrypoint < GLSLstd450Count) {
out << "(" << GlslStd450DebugNames[entrypoint] << ")";
}
#ifdef AMD_EXTENSIONS
} else if (extInstSet == GLSLextAMDInst) {
out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
#endif
#ifdef NV_EXTENSIONS
}
else if (extInstSet == GLSLextNVInst) {
out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
#endif
}
}
break;
@ -534,6 +512,19 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
case OperandLiteralString:
numOperands -= disassembleString();
break;
case OperandMemoryAccess:
outputMask(OperandMemoryAccess, stream[word++]);
--numOperands;
// Aligned is the only memory access operand that uses an immediate
// value, and it is also the first operand that uses a value at all.
if (stream[word-1] & MemoryAccessAlignedMask) {
disassembleImmediates(1);
numOperands--;
if (numOperands)
out << " ";
}
disassembleIds(numOperands);
return;
default:
assert(operandClass >= OperandSource && operandClass < OperandOpcode);
@ -632,9 +623,11 @@ static void GLSLstd450GetDebugNames(const char** names)
names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
names[GLSLstd450NMin] = "NMin";
names[GLSLstd450NMax] = "NMax";
names[GLSLstd450NClamp] = "NClamp";
}
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
@ -676,36 +669,60 @@ static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint
return "Bad";
}
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0) {
strcmp(name, spv::E_SPV_NVX_multiview_per_view_attributes) == 0 ||
strcmp(name, spv::E_SPV_NV_fragment_shader_barycentric) == 0 ||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0 ||
strcmp(name, spv::E_SPV_NV_shader_image_footprint) == 0) {
switch (entrypoint) {
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
// NV builtins
case BuiltInViewportMaskNV: return "ViewportMaskNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
case BuiltInPositionPerViewNV: return "PositionPerViewNV";
case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case BuiltInBaryCoordNV: return "BaryCoordNV";
case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
case BuiltInTaskCountNV: return "TaskCountNV";
case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV";
case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV";
case BuiltInLayerPerViewNV: return "LayerPerViewNV";
case BuiltInMeshViewCountNV: return "MeshViewCountNV";
case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
// NV Capabilities
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
case CapabilityMeshShadingNV: return "MeshShadingNV";
case CapabilityImageFootprintNV: return "ImageFootprintNV";
case CapabilitySampleMaskOverrideCoverageNV:return "SampleMaskOverrideCoverageNV";
// NV Decorations
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
case DecorationPerVertexNV: return "PerVertexNV";
case DecorationPerPrimitiveNV: return "PerPrimitiveNV";
case DecorationPerViewNV: return "PerViewNV";
case DecorationPerTaskNV: return "PerTaskNV";
default: return "Bad";
}
}
return "Bad";
}
#endif
void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
{

3
Externals/glslang/SPIRV/disassemble.h vendored Executable file → Normal file
View File

@ -45,8 +45,9 @@
namespace spv {
// disassemble with glslang custom disassembler
void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
}; // end namespace spv
} // end namespace spv
#endif // disassembler_H

338
Externals/glslang/SPIRV/doc.cpp vendored Executable file → Normal file
View File

@ -50,12 +50,8 @@ namespace spv {
// Include C-based headers that don't have a namespace
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
@ -98,8 +94,17 @@ const char* ExecutionModelString(int model)
case 4: return "Fragment";
case 5: return "GLCompute";
case 6: return "Kernel";
case ExecutionModelTaskNV: return "TaskNV";
case ExecutionModelMeshNV: return "MeshNV";
default: return "Bad";
case ExecutionModelRayGenerationNV: return "RayGenerationNV";
case ExecutionModelIntersectionNV: return "IntersectionNV";
case ExecutionModelAnyHitNV: return "AnyHitNV";
case ExecutionModelClosestHitNV: return "ClosestHitNV";
case ExecutionModelMissNV: return "MissNV";
case ExecutionModelCallableNV: return "CallableNV";
}
}
@ -110,6 +115,8 @@ const char* AddressingString(int addr)
case 1: return "Physical32";
case 2: return "Physical64";
case AddressingModelPhysicalStorageBuffer64EXT: return "PhysicalStorageBuffer64EXT";
default: return "Bad";
}
}
@ -117,9 +124,10 @@ const char* AddressingString(int addr)
const char* MemoryString(int mem)
{
switch (mem) {
case 0: return "Simple";
case 1: return "GLSL450";
case 2: return "OpenCL";
case MemoryModelSimple: return "Simple";
case MemoryModelGLSL450: return "GLSL450";
case MemoryModelOpenCL: return "OpenCL";
case MemoryModelVulkanKHR: return "VulkanKHR";
default: return "Bad";
}
@ -165,6 +173,20 @@ const char* ExecutionModeString(int mode)
case 32: return "Bad";
case 4446: return "PostDepthCoverage";
case ExecutionModeOutputLinesNV: return "OutputLinesNV";
case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV";
case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV";
case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV";
case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV";
case ExecutionModePixelInterlockOrderedEXT: return "PixelInterlockOrderedEXT";
case ExecutionModePixelInterlockUnorderedEXT: return "PixelInterlockUnorderedEXT";
case ExecutionModeSampleInterlockOrderedEXT: return "SampleInterlockOrderedEXT";
case ExecutionModeSampleInterlockUnorderedEXT: return "SampleInterlockUnorderedEXT";
case ExecutionModeShadingRateInterlockOrderedEXT: return "ShadingRateInterlockOrderedEXT";
case ExecutionModeShadingRateInterlockUnorderedEXT: return "ShadingRateInterlockUnorderedEXT";
case ExecutionModeCeiling:
default: return "Bad";
}
@ -187,6 +209,15 @@ const char* StorageClassString(int StorageClass)
case 11: return "Image";
case 12: return "StorageBuffer";
case StorageClassRayPayloadNV: return "RayPayloadNV";
case StorageClassHitAttributeNV: return "HitAttributeNV";
case StorageClassIncomingRayPayloadNV: return "IncomingRayPayloadNV";
case StorageClassShaderRecordBufferNV: return "ShaderRecordBufferNV";
case StorageClassCallableDataNV: return "CallableDataNV";
case StorageClassIncomingCallableDataNV: return "IncomingCallableDataNV";
case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT";
default: return "Bad";
}
}
@ -245,19 +276,21 @@ const char* DecorationString(int decoration)
case DecorationCeiling:
default: return "Bad";
#ifdef AMD_EXTENSIONS
case DecorationExplicitInterpAMD: return "ExplicitInterpAMD";
#endif
#ifdef NV_EXTENSIONS
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
#endif
case DecorationPerPrimitiveNV: return "PerPrimitiveNV";
case DecorationPerViewNV: return "PerViewNV";
case DecorationPerTaskNV: return "PerTaskNV";
case DecorationPerVertexNV: return "PerVertexNV";
case DecorationNonUniformEXT: return "DecorationNonUniformEXT";
case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE";
case DecorationHlslSemanticGOOGLE: return "DecorationHlslSemanticGOOGLE";
case DecorationRestrictPointerEXT: return "DecorationRestrictPointerEXT";
case DecorationAliasedPointerEXT: return "DecorationAliasedPointerEXT";
}
}
@ -321,7 +354,6 @@ const char* BuiltInString(int builtIn)
case 4426: return "DrawIndex";
case 5014: return "FragStencilRefEXT";
#ifdef AMD_EXTENSIONS
case 4992: return "BaryCoordNoPerspAMD";
case 4993: return "BaryCoordNoPerspCentroidAMD";
case 4994: return "BaryCoordNoPerspSampleAMD";
@ -329,18 +361,48 @@ const char* BuiltInString(int builtIn)
case 4996: return "BaryCoordSmoothCentroidAMD";
case 4997: return "BaryCoordSmoothSampleAMD";
case 4998: return "BaryCoordPullModelAMD";
#endif
case BuiltInLaunchIdNV: return "LaunchIdNV";
case BuiltInLaunchSizeNV: return "LaunchSizeNV";
case BuiltInWorldRayOriginNV: return "WorldRayOriginNV";
case BuiltInWorldRayDirectionNV: return "WorldRayDirectionNV";
case BuiltInObjectRayOriginNV: return "ObjectRayOriginNV";
case BuiltInObjectRayDirectionNV: return "ObjectRayDirectionNV";
case BuiltInRayTminNV: return "RayTminNV";
case BuiltInRayTmaxNV: return "RayTmaxNV";
case BuiltInInstanceCustomIndexNV: return "InstanceCustomIndexNV";
case BuiltInObjectToWorldNV: return "ObjectToWorldNV";
case BuiltInWorldToObjectNV: return "WorldToObjectNV";
case BuiltInHitTNV: return "HitTNV";
case BuiltInHitKindNV: return "HitKindNV";
case BuiltInIncomingRayFlagsNV: return "IncomingRayFlagsNV";
case BuiltInViewportMaskNV: return "ViewportMaskNV";
case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case BuiltInPositionPerViewNV: return "PositionPerViewNV";
case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
// case BuiltInFragmentSizeNV: return "FragmentSizeNV"; // superseded by BuiltInFragSizeEXT
// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT
case BuiltInBaryCoordNV: return "BaryCoordNV";
case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
#ifdef NV_EXTENSIONS
case 5253: return "ViewportMaskNV";
case 5257: return "SecondaryPositionNV";
case 5258: return "SecondaryViewportMaskNV";
case 5261: return "PositionPerViewNV";
case 5262: return "ViewportMaskPerViewNV";
#endif
case BuiltInFragSizeEXT: return "FragSizeEXT";
case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT";
case 5264: return "FullyCoveredEXT";
case BuiltInTaskCountNV: return "TaskCountNV";
case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV";
case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV";
case BuiltInLayerPerViewNV: return "LayerPerViewNV";
case BuiltInMeshViewCountNV: return "MeshViewCountNV";
case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
case BuiltInWarpsPerSMNV: return "WarpsPerSMNV";
case BuiltInSMCountNV: return "SMCountNV";
case BuiltInWarpIDNV: return "WarpIDNV";
case BuiltInSMIDNV: return "SMIDNV";
default: return "Bad";
}
}
@ -499,19 +561,25 @@ const char* ImageChannelDataTypeString(int type)
}
}
const int ImageOperandsCeiling = 8;
const int ImageOperandsCeiling = 14;
const char* ImageOperandsString(int format)
{
switch (format) {
case 0: return "Bias";
case 1: return "Lod";
case 2: return "Grad";
case 3: return "ConstOffset";
case 4: return "Offset";
case 5: return "ConstOffsets";
case 6: return "Sample";
case 7: return "MinLod";
case ImageOperandsBiasShift: return "Bias";
case ImageOperandsLodShift: return "Lod";
case ImageOperandsGradShift: return "Grad";
case ImageOperandsConstOffsetShift: return "ConstOffset";
case ImageOperandsOffsetShift: return "Offset";
case ImageOperandsConstOffsetsShift: return "ConstOffsets";
case ImageOperandsSampleShift: return "Sample";
case ImageOperandsMinLodShift: return "MinLod";
case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR";
case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR";
case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR";
case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR";
case ImageOperandsSignExtendShift: return "SignExtend";
case ImageOperandsZeroExtendShift: return "ZeroExtend";
case ImageOperandsCeiling:
default:
@ -594,15 +662,20 @@ const char* SelectControlString(int cont)
}
}
const int LoopControlCeiling = 4;
const int LoopControlCeiling = LoopControlPartialCountShift + 1;
const char* LoopControlString(int cont)
{
switch (cont) {
case 0: return "Unroll";
case 1: return "DontUnroll";
case 2: return "DependencyInfinite";
case 3: return "DependencyLength";
case LoopControlUnrollShift: return "Unroll";
case LoopControlDontUnrollShift: return "DontUnroll";
case LoopControlDependencyInfiniteShift: return "DependencyInfinite";
case LoopControlDependencyLengthShift: return "DependencyLength";
case LoopControlMinIterationsShift: return "MinIterations";
case LoopControlMaxIterationsShift: return "MaxIterations";
case LoopControlIterationMultipleShift: return "IterationMultiple";
case LoopControlPeelCountShift: return "PeelCount";
case LoopControlPartialCountShift: return "PartialCount";
case LoopControlCeiling:
default: return "Bad";
@ -645,12 +718,17 @@ const char* MemorySemanticsString(int mem)
}
}
const int MemoryAccessCeiling = 6;
const char* MemoryAccessString(int mem)
{
switch (mem) {
case 0: return "Volatile";
case 1: return "Aligned";
case 2: return "Nontemporal";
case MemoryAccessVolatileShift: return "Volatile";
case MemoryAccessAlignedShift: return "Aligned";
case MemoryAccessNontemporalShift: return "Nontemporal";
case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR";
case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR";
case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR";
default: return "Bad";
}
@ -678,11 +756,9 @@ const char* GroupOperationString(int gop)
case GroupOperationInclusiveScan: return "InclusiveScan";
case GroupOperationExclusiveScan: return "ExclusiveScan";
case GroupOperationClusteredReduce: return "ClusteredReduce";
#ifdef NV_EXTENSIONS
case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV";
case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV";
case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV";
#endif
default: return "Bad";
}
@ -790,44 +866,72 @@ const char* CapabilityString(int info)
case CapabilityStoragePushConstant16: return "StoragePushConstant16";
case CapabilityStorageInputOutput16: return "StorageInputOutput16";
case CapabilityStorageBuffer8BitAccess: return "StorageBuffer8BitAccess";
case CapabilityUniformAndStorageBuffer8BitAccess: return "UniformAndStorageBuffer8BitAccess";
case CapabilityStoragePushConstant8: return "StoragePushConstant8";
case CapabilityDeviceGroup: return "DeviceGroup";
case CapabilityMultiView: return "MultiView";
case CapabilityStencilExportEXT: return "StencilExportEXT";
#ifdef AMD_EXTENSIONS
case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
case CapabilityFragmentMaskAMD: return "FragmentMaskAMD";
case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD";
#endif
case CapabilityAtomicStorageOps: return "AtomicStorageOps";
case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage";
#ifdef NV_EXTENSIONS
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV";
#endif
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV";
case CapabilityRayTracingNV: return "RayTracingNV";
case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV";
case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV";
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
case CapabilityMeshShadingNV: return "MeshShadingNV";
case CapabilityImageFootprintNV: return "ImageFootprintNV";
// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by FragmentDensityEXT
case CapabilitySampleMaskOverrideCoverageNV: return "SampleMaskOverrideCoverageNV";
case CapabilityFragmentDensityEXT: return "FragmentDensityEXT";
case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT";
case CapabilityShaderNonUniformEXT: return "CapabilityShaderNonUniformEXT";
case CapabilityRuntimeDescriptorArrayEXT: return "CapabilityRuntimeDescriptorArrayEXT";
case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "CapabilityInputAttachmentArrayDynamicIndexingEXT";
case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "CapabilityUniformTexelBufferArrayDynamicIndexingEXT";
case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "CapabilityStorageTexelBufferArrayDynamicIndexingEXT";
case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "CapabilityUniformBufferArrayNonUniformIndexingEXT";
case CapabilitySampledImageArrayNonUniformIndexingEXT: return "CapabilitySampledImageArrayNonUniformIndexingEXT";
case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "CapabilityStorageBufferArrayNonUniformIndexingEXT";
case CapabilityStorageImageArrayNonUniformIndexingEXT: return "CapabilityStorageImageArrayNonUniformIndexingEXT";
case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "CapabilityInputAttachmentArrayNonUniformIndexingEXT";
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
case CapabilityShaderNonUniformEXT: return "ShaderNonUniformEXT";
case CapabilityRuntimeDescriptorArrayEXT: return "RuntimeDescriptorArrayEXT";
case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "InputAttachmentArrayDynamicIndexingEXT";
case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "UniformTexelBufferArrayDynamicIndexingEXT";
case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "StorageTexelBufferArrayDynamicIndexingEXT";
case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "UniformBufferArrayNonUniformIndexingEXT";
case CapabilitySampledImageArrayNonUniformIndexingEXT: return "SampledImageArrayNonUniformIndexingEXT";
case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "StorageBufferArrayNonUniformIndexingEXT";
case CapabilityStorageImageArrayNonUniformIndexingEXT: return "StorageImageArrayNonUniformIndexingEXT";
case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "InputAttachmentArrayNonUniformIndexingEXT";
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "UniformTexelBufferArrayNonUniformIndexingEXT";
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "StorageTexelBufferArrayNonUniformIndexingEXT";
case CapabilityVulkanMemoryModelKHR: return "VulkanMemoryModelKHR";
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "VulkanMemoryModelDeviceScopeKHR";
case CapabilityPhysicalStorageBufferAddressesEXT: return "PhysicalStorageBufferAddressesEXT";
case CapabilityVariablePointers: return "VariablePointers";
case CapabilityCooperativeMatrixNV: return "CooperativeMatrixNV";
case CapabilityShaderSMBuiltinsNV: return "ShaderSMBuiltinsNV";
case CapabilityFragmentShaderSampleInterlockEXT: return "CapabilityFragmentShaderSampleInterlockEXT";
case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT";
case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT";
case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT";
case CapabilityShaderClockKHR: return "ShaderClockKHR";
case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL";
default: return "Bad";
}
@ -921,6 +1025,7 @@ const char* OpcodeString(int op)
case 82: return "OpCompositeInsert";
case 83: return "OpCopyObject";
case 84: return "OpTranspose";
case OpCopyLogical: return "OpCopyLogical";
case 85: return "Bad";
case 86: return "OpSampledImage";
case 87: return "OpImageSampleImplicitLod";
@ -1203,7 +1308,6 @@ const char* OpcodeString(int op)
case 4430: return "OpSubgroupAllEqualKHR";
case 4432: return "OpSubgroupReadInvocationKHR";
#ifdef AMD_EXTENSIONS
case 5000: return "OpGroupIAddNonUniformAMD";
case 5001: return "OpGroupFAddNonUniformAMD";
case 5002: return "OpGroupFMinNonUniformAMD";
@ -1215,14 +1319,33 @@ const char* OpcodeString(int op)
case 5011: return "OpFragmentMaskFetchAMD";
case 5012: return "OpFragmentFetchAMD";
#endif
case OpReadClockKHR: return "OpReadClockKHR";
case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
#ifdef NV_EXTENSIONS
case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
#endif
case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
case OpReportIntersectionNV: return "OpReportIntersectionNV";
case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
case OpTerminateRayNV: return "OpTerminateRayNV";
case OpTraceNV: return "OpTraceNV";
case OpTypeAccelerationStructureNV: return "OpTypeAccelerationStructureNV";
case OpExecuteCallableNV: return "OpExecuteCallableNV";
case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV";
case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV";
case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV";
case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV";
case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV";
case OpDemoteToHelperInvocationEXT: return "OpDemoteToHelperInvocationEXT";
case OpIsHelperInvocationEXT: return "OpIsHelperInvocationEXT";
case OpBeginInvocationInterlockEXT: return "OpBeginInvocationInterlockEXT";
case OpEndInvocationInterlockEXT: return "OpEndInvocationInterlockEXT";
default:
return "Bad";
}
@ -1241,6 +1364,7 @@ EnumParameters DecorationParams[DecorationCeiling];
EnumParameters LoopControlParams[FunctionControlCeiling];
EnumParameters SelectionControlParams[SelectControlCeiling];
EnumParameters FunctionControlParams[FunctionControlCeiling];
EnumParameters MemoryAccessParams[MemoryAccessCeiling];
// Set up all the parameterizing descriptions of the opcodes, operands, etc.
void Parameterize()
@ -1333,6 +1457,10 @@ void Parameterize()
InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false);
InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false);
InstructionDesc[OpModuleProcessed].setResultAndType(false, false);
InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false);
InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false);
InstructionDesc[OpBeginInvocationInterlockEXT].setResultAndType(false, false);
InstructionDesc[OpEndInvocationInterlockEXT].setResultAndType(false, false);
// Specific additional context-dependent operands
@ -1396,7 +1524,7 @@ void Parameterize()
OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true);
OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true);
OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true);
OperandClassParams[OperandMemoryAccess].set(0, MemoryAccessString, nullptr, true);
OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true);
OperandClassParams[OperandScope].set(0, ScopeString, nullptr);
OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr);
OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr);
@ -1518,10 +1646,14 @@ void Parameterize()
InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true);
InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpLoad].operands.push(OperandId, "", true);
InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpStore].operands.push(OperandId, "'Object'");
InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true);
InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpStore].operands.push(OperandId, "", true);
InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'");
@ -1806,6 +1938,8 @@ void Parameterize()
InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'");
InstructionDesc[OpCopyLogical].operands.push(OperandId, "'Operand'");
InstructionDesc[OpIsNan].operands.push(OperandId, "'x'");
InstructionDesc[OpIsInf].operands.push(OperandId, "'x'");
@ -2519,7 +2653,6 @@ void Parameterize()
InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'");
#ifdef AMD_EXTENSIONS
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
@ -2558,11 +2691,74 @@ void Parameterize()
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
#endif
#ifdef NV_EXTENSIONS
InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X");
#endif
InstructionDesc[OpTypeAccelerationStructureNV].setResultAndType(true, false);
InstructionDesc[OpTraceNV].operands.push(OperandId, "'NV Acceleration Structure'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Flags'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Cull Mask'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Offset'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Stride'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Miss Index'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Origin'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMin'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Direction'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMax'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Payload'");
InstructionDesc[OpTraceNV].setResultAndType(false, false);
InstructionDesc[OpReportIntersectionNV].operands.push(OperandId, "'Hit Parameter'");
InstructionDesc[OpReportIntersectionNV].operands.push(OperandId, "'Hit Kind'");
InstructionDesc[OpIgnoreIntersectionNV].setResultAndType(false, false);
InstructionDesc[OpTerminateRayNV].setResultAndType(false, false);
InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "SBT Record Index");
InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "CallableData ID");
InstructionDesc[OpExecuteCallableNV].setResultAndType(false, false);
InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Sampled Image'");
InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coordinate'");
InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Granularity'");
InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coarse'");
InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandImageOperands, "", true);
InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandVariableIds, "", true);
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'");
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Rows'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Columns'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Stride'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Column Major'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandMemoryAccess, "'Memory Access'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "", true);
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Object'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Stride'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Column Major'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandMemoryAccess, "'Memory Access'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "", true);
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'A'");
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'B'");
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'");
InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'");
InstructionDesc[OpDemoteToHelperInvocationEXT].setResultAndType(false, false);
InstructionDesc[OpReadClockKHR].operands.push(OperandScope, "'Scope'");
}
}; // end spv namespace

View File

@ -255,4 +255,4 @@ const char* AccessQualifierString(int attr);
void PrintOperands(const OperandParameters& operands, int reservedOperands);
}; // end namespace spv
} // end namespace spv

File diff suppressed because it is too large Load Diff

100
Externals/glslang/SPIRV/spvIR.h vendored Executable file → Normal file
View File

@ -1,5 +1,6 @@
//
// Copyright (C) 2014 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -79,6 +80,12 @@ const MemorySemanticsMask MemorySemanticsAllMemory =
MemorySemanticsAtomicCounterMemoryMask |
MemorySemanticsImageMemoryMask);
struct IdImmediate {
bool isId; // true if word is an Id, false if word is an immediate
unsigned word;
IdImmediate(bool i, unsigned w) : isId(i), word(w) {}
};
//
// SPIR-V IR instruction.
//
@ -88,8 +95,19 @@ public:
Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { }
explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { }
virtual ~Instruction() {}
void addIdOperand(Id id) { operands.push_back(id); }
void addImmediateOperand(unsigned int immediate) { operands.push_back(immediate); }
void addIdOperand(Id id) {
operands.push_back(id);
idOperand.push_back(true);
}
void addImmediateOperand(unsigned int immediate) {
operands.push_back(immediate);
idOperand.push_back(false);
}
void setImmediateOperand(unsigned idx, unsigned int immediate) {
assert(!idOperand[idx]);
operands[idx] = immediate;
}
void addStringOperand(const char* str)
{
unsigned int word;
@ -116,14 +134,25 @@ public:
addImmediateOperand(word);
}
}
bool isIdOperand(int op) const { return idOperand[op]; }
void setBlock(Block* b) { block = b; }
Block* getBlock() const { return block; }
Op getOpCode() const { return opCode; }
int getNumOperands() const { return (int)operands.size(); }
int getNumOperands() const
{
assert(operands.size() == idOperand.size());
return (int)operands.size();
}
Id getResultId() const { return resultId; }
Id getTypeId() const { return typeId; }
Id getIdOperand(int op) const { return operands[op]; }
unsigned int getImmediateOperand(int op) const { return operands[op]; }
Id getIdOperand(int op) const {
assert(idOperand[op]);
return operands[op];
}
unsigned int getImmediateOperand(int op) const {
assert(!idOperand[op]);
return operands[op];
}
// Write out the binary form.
void dump(std::vector<unsigned int>& out) const
@ -153,7 +182,8 @@ protected:
Id resultId;
Id typeId;
Op opCode;
std::vector<Id> operands;
std::vector<Id> operands; // operands, both <id> and immediates (both are unsigned int)
std::vector<bool> idOperand; // true for operands that are <id>, false for immediates
Block* block;
};
@ -179,6 +209,7 @@ public:
const std::vector<std::unique_ptr<Instruction> >& getInstructions() const {
return instructions;
}
const std::vector<std::unique_ptr<Instruction> >& getLocalVariables() const { return localVariables; }
void setUnreachable() { unreachable = true; }
bool isUnreachable() const { return unreachable; }
// Returns the block's merge instruction, if one exists (otherwise null).
@ -195,6 +226,36 @@ public:
return nullptr;
}
// Change this block into a canonical dead merge block. Delete instructions
// as necessary. A canonical dead merge block has only an OpLabel and an
// OpUnreachable.
void rewriteAsCanonicalUnreachableMerge() {
assert(localVariables.empty());
// Delete all instructions except for the label.
assert(instructions.size() > 0);
instructions.resize(1);
successors.clear();
Instruction* unreachable = new Instruction(OpUnreachable);
addInstruction(std::unique_ptr<Instruction>(unreachable));
}
// Change this block into a canonical dead continue target branching to the
// given header ID. Delete instructions as necessary. A canonical dead continue
// target has only an OpLabel and an unconditional branch back to the corresponding
// header.
void rewriteAsCanonicalUnreachableContinue(Block* header) {
assert(localVariables.empty());
// Delete all instructions except for the label.
assert(instructions.size() > 0);
instructions.resize(1);
successors.clear();
// Add OpBranch back to the header.
assert(header != nullptr);
Instruction* branch = new Instruction(OpBranch);
branch->addIdOperand(header->getId());
addInstruction(std::unique_ptr<Instruction>(branch));
successors.push_back(header);
}
bool isTerminated() const
{
switch (instructions.back()->getOpCode()) {
@ -204,6 +265,7 @@ public:
case OpKill:
case OpReturn:
case OpReturnValue:
case OpUnreachable:
return true;
default:
return false;
@ -237,10 +299,24 @@ protected:
bool unreachable;
};
// The different reasons for reaching a block in the inReadableOrder traversal.
enum ReachReason {
// Reachable from the entry block via transfers of control, i.e. branches.
ReachViaControlFlow = 0,
// A continue target that is not reachable via control flow.
ReachDeadContinue,
// A merge block that is not reachable via control flow.
ReachDeadMerge
};
// Traverses the control-flow graph rooted at root in an order suited for
// readable code generation. Invokes callback at every node in the traversal
// order.
void inReadableOrder(Block* root, std::function<void(Block*)> callback);
// order. The callback arguments are:
// - the block,
// - the reason we reached the block,
// - if the reason was that block is an unreachable continue or unreachable merge block
// then the last parameter is the corresponding header block.
void inReadableOrder(Block* root, std::function<void(Block*, ReachReason, Block* header)> callback);
//
// SPIR-V IR Function.
@ -290,7 +366,7 @@ public:
parameterInstructions[p]->dump(out);
// Blocks
inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
inReadableOrder(blocks[0], [&out](const Block* b, ReachReason, Block*) { b->dump(out); });
Instruction end(0, 0, OpFunctionEnd);
end.dump(out);
}
@ -331,7 +407,9 @@ public:
Instruction* getInstruction(Id id) const { return idToInstruction[id]; }
const std::vector<Function*>& getFunctions() const { return functions; }
spv::Id getTypeId(Id resultId) const { return idToInstruction[resultId]->getTypeId(); }
spv::Id getTypeId(Id resultId) const {
return idToInstruction[resultId] == nullptr ? NoType : idToInstruction[resultId]->getTypeId();
}
StorageClass getStorageClass(Id typeId) const
{
assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer);
@ -403,6 +481,6 @@ __inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
parent.getParent().mapInstruction(raw_instruction);
}
}; // end spv namespace
} // end spv namespace
#endif // spvIR_H

39
Externals/glslang/StandAlone/CMakeLists.txt vendored Executable file → Normal file
View File

@ -4,25 +4,25 @@ set_property(TARGET glslang-default-resource-limits PROPERTY FOLDER glslang)
set_property(TARGET glslang-default-resource-limits PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(glslang-default-resource-limits
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
PUBLIC ${PROJECT_SOURCE_DIR})
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
PUBLIC $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>)
set(SOURCES StandAlone.cpp DirStackFileIncluder.h)
set(REMAPPER_SOURCES spirv-remap.cpp)
add_executable(glslangValidator ${SOURCES})
add_executable(spirv-remap ${REMAPPER_SOURCES})
set_property(TARGET glslangValidator PROPERTY FOLDER tools)
set_property(TARGET spirv-remap PROPERTY FOLDER tools)
glslang_set_link_args(glslangValidator)
glslang_set_link_args(spirv-remap)
set(LIBRARIES
glslang
SPIRV
SPVRemapper
glslang-default-resource-limits)
if(ENABLE_SPVREMAPPER)
set(LIBRARIES ${LIBRARIES} SPVRemapper)
endif()
if(WIN32)
set(LIBRARIES ${LIBRARIES} psapi)
elseif(UNIX)
@ -32,21 +32,36 @@ elseif(UNIX)
endif(WIN32)
target_link_libraries(glslangValidator ${LIBRARIES})
target_link_libraries(spirv-remap ${LIBRARIES})
target_include_directories(glslangValidator PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../External>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/External>)
if(ENABLE_SPVREMAPPER)
set(REMAPPER_SOURCES spirv-remap.cpp)
add_executable(spirv-remap ${REMAPPER_SOURCES})
set_property(TARGET spirv-remap PROPERTY FOLDER tools)
glslang_set_link_args(spirv-remap)
target_link_libraries(spirv-remap ${LIBRARIES})
endif()
if(WIN32)
source_group("Source" FILES ${SOURCES})
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS glslangValidator
install(TARGETS glslangValidator EXPORT glslangValidatorTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
install(EXPORT glslangValidatorTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
install(TARGETS spirv-remap
if(ENABLE_SPVREMAPPER)
install(TARGETS spirv-remap EXPORT spirv-remapTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
install(EXPORT spirv-remapTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif()
if(BUILD_SHARED_LIBS)
install(TARGETS glslang-default-resource-limits
install(TARGETS glslang-default-resource-limits EXPORT glslang-default-resource-limitsTargets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT glslang-default-resource-limitsTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif()
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -125,6 +125,16 @@ const TBuiltInResource DefaultTBuiltInResource = {
/* .MaxCullDistances = */ 8,
/* .MaxCombinedClipAndCullDistances = */ 8,
/* .MaxSamples = */ 4,
/* .maxMeshOutputVerticesNV = */ 256,
/* .maxMeshOutputPrimitivesNV = */ 512,
/* .maxMeshWorkGroupSizeX_NV = */ 32,
/* .maxMeshWorkGroupSizeY_NV = */ 1,
/* .maxMeshWorkGroupSizeZ_NV = */ 1,
/* .maxTaskWorkGroupSizeX_NV = */ 32,
/* .maxTaskWorkGroupSizeY_NV = */ 1,
/* .maxTaskWorkGroupSizeZ_NV = */ 1,
/* .maxMeshViewCountNV = */ 4,
/* .limits = */ {
/* .nonInductiveForLoops = */ 1,
/* .whileLoops = */ 1,
@ -224,7 +234,15 @@ std::string GetDefaultTBuiltInResourceString()
<< "MaxCullDistances " << DefaultTBuiltInResource.maxCullDistances << "\n"
<< "MaxCombinedClipAndCullDistances " << DefaultTBuiltInResource.maxCombinedClipAndCullDistances << "\n"
<< "MaxSamples " << DefaultTBuiltInResource.maxSamples << "\n"
<< "MaxMeshOutputVerticesNV " << DefaultTBuiltInResource.maxMeshOutputVerticesNV << "\n"
<< "MaxMeshOutputPrimitivesNV " << DefaultTBuiltInResource.maxMeshOutputPrimitivesNV << "\n"
<< "MaxMeshWorkGroupSizeX_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeX_NV << "\n"
<< "MaxMeshWorkGroupSizeY_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeY_NV << "\n"
<< "MaxMeshWorkGroupSizeZ_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeZ_NV << "\n"
<< "MaxTaskWorkGroupSizeX_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeX_NV << "\n"
<< "MaxTaskWorkGroupSizeY_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeY_NV << "\n"
<< "MaxTaskWorkGroupSizeZ_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeZ_NV << "\n"
<< "MaxMeshViewCountNV " << DefaultTBuiltInResource.maxMeshViewCountNV << "\n"
<< "nonInductiveForLoops " << DefaultTBuiltInResource.limits.nonInductiveForLoops << "\n"
<< "whileLoops " << DefaultTBuiltInResource.limits.whileLoops << "\n"
<< "doWhileLoops " << DefaultTBuiltInResource.limits.doWhileLoops << "\n"
@ -431,6 +449,24 @@ void DecodeResourceLimits(TBuiltInResource* resources, char* config)
resources->maxCombinedClipAndCullDistances = value;
else if (tokenStr == "MaxSamples")
resources->maxSamples = value;
else if (tokenStr == "MaxMeshOutputVerticesNV")
resources->maxMeshOutputVerticesNV = value;
else if (tokenStr == "MaxMeshOutputPrimitivesNV")
resources->maxMeshOutputPrimitivesNV = value;
else if (tokenStr == "MaxMeshWorkGroupSizeX_NV")
resources->maxMeshWorkGroupSizeX_NV = value;
else if (tokenStr == "MaxMeshWorkGroupSizeY_NV")
resources->maxMeshWorkGroupSizeY_NV = value;
else if (tokenStr == "MaxMeshWorkGroupSizeZ_NV")
resources->maxMeshWorkGroupSizeZ_NV = value;
else if (tokenStr == "MaxTaskWorkGroupSizeX_NV")
resources->maxTaskWorkGroupSizeX_NV = value;
else if (tokenStr == "MaxTaskWorkGroupSizeY_NV")
resources->maxTaskWorkGroupSizeY_NV = value;
else if (tokenStr == "MaxTaskWorkGroupSizeZ_NV")
resources->maxTaskWorkGroupSizeZ_NV = value;
else if (tokenStr == "MaxMeshViewCountNV")
resources->maxMeshViewCountNV = value;
else if (tokenStr == "nonInductiveForLoops")
resources->limits.nonInductiveForLoops = (value != 0);
else if (tokenStr == "whileLoops")

View File

@ -37,7 +37,7 @@
#include <string>
#include "glslang/Include/ResourceLimits.h"
#include "../glslang/Include/ResourceLimits.h"
namespace glslang {

View File

@ -102,6 +102,9 @@ enum TOptions {
EOptionDumpBareVersion = (1 << 31),
};
bool targetHlslFunctionality1 = false;
bool SpvToolsDisassembler = false;
bool SpvToolsValidate = false;
bool NaNClamp = false;
//
// Return codes from main/exit().
@ -143,13 +146,16 @@ void ProcessConfigFile()
{
if (ConfigFile.size() == 0)
Resources = glslang::DefaultTBuiltInResource;
#ifndef GLSLANG_WEB
else {
char* configString = ReadFileData(ConfigFile.c_str());
glslang::DecodeResourceLimits(&Resources, configString);
FreeFileData(configString);
}
#endif
}
int ReflectOptions = EShReflectionDefault;
int Options = 0;
const char* ExecutableName = nullptr;
const char* binaryFileName = nullptr;
@ -158,19 +164,28 @@ const char* sourceEntryPointName = nullptr;
const char* shaderStageName = nullptr;
const char* variableName = nullptr;
bool HlslEnable16BitTypes = false;
bool HlslDX9compatible = false;
bool DumpBuiltinSymbols = false;
std::vector<std::string> IncludeDirectoryList;
int ClientInputSemanticsVersion = 100; // maps to, say, #define VULKAN 100
glslang::EShTargetClientVersion VulkanClientVersion =
glslang::EShTargetVulkan_1_0; // would map to, say, Vulkan 1.0
glslang::EShTargetClientVersion OpenGLClientVersion =
glslang::EShTargetOpenGL_450; // doesn't influence anything yet, but maps to OpenGL 4.50
glslang::EShTargetLanguageVersion TargetVersion =
glslang::EShTargetSpv_1_0; // maps to, say, SPIR-V 1.0
// Source environment
// (source 'Client' is currently the same as target 'Client')
int ClientInputSemanticsVersion = 100;
// Target environment
glslang::EShClient Client = glslang::EShClientNone; // will stay EShClientNone if only validating
glslang::EShTargetClientVersion ClientVersion; // not valid until Client is set
glslang::EShTargetLanguage TargetLanguage = glslang::EShTargetNone;
glslang::EShTargetLanguageVersion TargetVersion; // not valid until TargetLanguage is set
std::vector<std::string> Processes; // what should be recorded by OpModuleProcessed, or equivalent
// Per descriptor-set binding base data
typedef std::map<unsigned int, unsigned int> TPerSetBaseBinding;
std::vector<std::pair<std::string, int>> uniformLocationOverrides;
int uniformBase = 0;
std::array<std::array<unsigned int, EShLangCount>, glslang::EResCount> baseBinding;
std::array<std::array<TPerSetBaseBinding, EShLangCount>, glslang::EResCount> baseBindingForSet;
std::array<std::vector<std::string>, EShLangCount> baseResourceSetBinding;
@ -189,7 +204,7 @@ public:
text.append("#define ");
fixLine(def);
Processes.push_back("D");
Processes.push_back("define-macro ");
Processes.back().append(def);
// The first "=" needs to turn into a space
@ -207,7 +222,7 @@ public:
text.append("#undef ");
fixLine(undef);
Processes.push_back("U");
Processes.push_back("undef-macro ");
Processes.back().append(undef);
text.append(undef);
@ -242,6 +257,14 @@ const char* GetBinaryName(EShLanguage stage)
case EShLangGeometry: name = "geom.spv"; break;
case EShLangFragment: name = "frag.spv"; break;
case EShLangCompute: name = "comp.spv"; break;
case EShLangRayGenNV: name = "rgen.spv"; break;
case EShLangIntersectNV: name = "rint.spv"; break;
case EShLangAnyHitNV: name = "rahit.spv"; break;
case EShLangClosestHitNV: name = "rchit.spv"; break;
case EShLangMissNV: name = "rmiss.spv"; break;
case EShLangCallableNV: name = "rcall.spv"; break;
case EShLangMeshNV: name = "mesh.spv"; break;
case EShLangTaskNV: name = "task.spv"; break;
default: name = "unknown"; break;
}
} else
@ -269,9 +292,12 @@ bool SetConfigFile(const std::string& name)
//
// Give error and exit with failure code.
//
void Error(const char* message)
void Error(const char* message, const char* detail = nullptr)
{
fprintf(stderr, "%s: Error %s (use -h for usage)\n", ExecutableName, message);
fprintf(stderr, "%s: Error: ", ExecutableName);
if (detail != nullptr)
fprintf(stderr, "%s: ", detail);
fprintf(stderr, "%s (use -h for usage)\n", message);
exit(EFailUsage);
}
@ -321,7 +347,7 @@ void ProcessBindingBase(int& argc, char**& argv, glslang::TResourceType res)
for (int lang = langMin; lang < langMax; ++lang) {
if (!perSetBase.empty())
baseBindingForSet[res][lang] = perSetBase;
baseBindingForSet[res][lang].insert(perSetBase.begin(), perSetBase.end());
else
baseBinding[res][lang] = singleBase;
}
@ -406,6 +432,9 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
// minimum needed (without overriding something else) to target Vulkan SPIR-V
const auto setVulkanSpv = []() {
if (Client == glslang::EShClientNone)
ClientVersion = glslang::EShTargetVulkan_1_0;
Client = glslang::EShClientVulkan;
Options |= EOptionSpv;
Options |= EOptionVulkanRules;
Options |= EOptionLinkProgram;
@ -413,12 +442,31 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
// minimum needed (without overriding something else) to target OpenGL SPIR-V
const auto setOpenGlSpv = []() {
if (Client == glslang::EShClientNone)
ClientVersion = glslang::EShTargetOpenGL_450;
Client = glslang::EShClientOpenGL;
Options |= EOptionSpv;
Options |= EOptionLinkProgram;
// undo a -H default to Vulkan
Options &= ~EOptionVulkanRules;
};
const auto getUniformOverride = [getStringOperand]() {
const char *arg = getStringOperand("-u<name>:<location>");
const char *split = strchr(arg, ':');
if (split == NULL) {
printf("%s: missing location\n", arg);
exit(EFailUsage);
}
errno = 0;
int location = ::strtol(split + 1, NULL, 10);
if (errno) {
printf("%s: invalid location\n", arg);
exit(EFailUsage);
}
return std::make_pair(std::string(arg, split - arg), location);
};
for (bumpArg(); argc >= 1; bumpArg()) {
if (argv[0][0] == '-') {
switch (argv[0][1]) {
@ -435,6 +483,12 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
} else if (lowerword == "auto-map-locations" || // synonyms
lowerword == "aml") {
Options |= EOptionAutoMapLocations;
} else if (lowerword == "uniform-base") {
if (argc <= 1)
Error("no <base> provided", lowerword.c_str());
uniformBase = ::strtol(argv[1], NULL, 10);
bumpArg();
break;
} else if (lowerword == "client") {
if (argc > 1) {
if (strcmp(argv[1], "vulkan100") == 0)
@ -442,8 +496,23 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
else if (strcmp(argv[1], "opengl100") == 0)
setOpenGlSpv();
else
Error("--client expects vulkan100 or opengl100");
}
Error("expects vulkan100 or opengl100", lowerword.c_str());
} else
Error("expects vulkan100 or opengl100", lowerword.c_str());
bumpArg();
} else if (lowerword == "define-macro" ||
lowerword == "d") {
if (argc > 1)
UserPreamble.addDef(argv[1]);
else
Error("expects <name[=def]>", argv[0]);
bumpArg();
} else if (lowerword == "dump-builtin-symbols") {
DumpBuiltinSymbols = true;
} else if (lowerword == "entry-point") {
entryPointName = argv[1];
if (argc <= 1)
Error("no <name> provided", lowerword.c_str());
bumpArg();
} else if (lowerword == "flatten-uniform-arrays" || // synonyms
lowerword == "flatten-uniform-array" ||
@ -457,17 +526,33 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
Options |= EOptionHlslIoMapping;
} else if (lowerword == "hlsl-enable-16bit-types") {
HlslEnable16BitTypes = true;
} else if (lowerword == "hlsl-dx9-compatible") {
HlslDX9compatible = true;
} else if (lowerword == "invert-y" || // synonyms
lowerword == "iy") {
Options |= EOptionInvertY;
} else if (lowerword == "keep-uncalled" || // synonyms
lowerword == "ku") {
Options |= EOptionKeepUncalled;
} else if (lowerword == "nan-clamp") {
NaNClamp = true;
} else if (lowerword == "no-storage-format" || // synonyms
lowerword == "nsf") {
Options |= EOptionNoStorageFormat;
} else if (lowerword == "relaxed-errors") {
Options |= EOptionRelaxedErrors;
} else if (lowerword == "reflect-strict-array-suffix") {
ReflectOptions |= EShReflectionStrictArraySuffix;
} else if (lowerword == "reflect-basic-array-suffix") {
ReflectOptions |= EShReflectionBasicArraySuffix;
} else if (lowerword == "reflect-intermediate-io") {
ReflectOptions |= EShReflectionIntermediateIO;
} else if (lowerword == "reflect-separate-buffers") {
ReflectOptions |= EShReflectionSeparateBuffers;
} else if (lowerword == "reflect-all-block-variables") {
ReflectOptions |= EShReflectionAllBlockVariables;
} else if (lowerword == "reflect-unwrap-io-blocks") {
ReflectOptions |= EShReflectionUnwrapIOBlocks;
} else if (lowerword == "resource-set-bindings" || // synonyms
lowerword == "resource-set-binding" ||
lowerword == "rsb") {
@ -477,8 +562,8 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
lowerword == "sib") {
ProcessBindingBase(argc, argv, glslang::EResImage);
} else if (lowerword == "shift-sampler-bindings" || // synonyms
lowerword == "shift-sampler-binding" ||
lowerword == "ssb") {
lowerword == "shift-sampler-binding" ||
lowerword == "ssb") {
ProcessBindingBase(argc, argv, glslang::EResSampler);
} else if (lowerword == "shift-uav-bindings" || // synonyms
lowerword == "shift-uav-binding" ||
@ -502,10 +587,14 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
} else if (lowerword == "source-entrypoint" || // synonyms
lowerword == "sep") {
if (argc <= 1)
Error("no <entry-point> provided for --source-entrypoint");
Error("no <entry-point> provided", lowerword.c_str());
sourceEntryPointName = argv[1];
bumpArg();
break;
} else if (lowerword == "spirv-dis") {
SpvToolsDisassembler = true;
} else if (lowerword == "spirv-val") {
SpvToolsValidate = true;
} else if (lowerword == "stdin") {
Options |= EOptionStdin;
shaderStageName = argv[1];
@ -515,30 +604,58 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
if (argc > 1) {
if (strcmp(argv[1], "vulkan1.0") == 0) {
setVulkanSpv();
VulkanClientVersion = glslang::EShTargetVulkan_1_0;
ClientVersion = glslang::EShTargetVulkan_1_0;
} else if (strcmp(argv[1], "vulkan1.1") == 0) {
setVulkanSpv();
TargetVersion = glslang::EShTargetSpv_1_3;
VulkanClientVersion = glslang::EShTargetVulkan_1_1;
ClientVersion = glslang::EShTargetVulkan_1_1;
} else if (strcmp(argv[1], "vulkan1.2") == 0) {
setVulkanSpv();
ClientVersion = glslang::EShTargetVulkan_1_2;
} else if (strcmp(argv[1], "opengl") == 0) {
setOpenGlSpv();
OpenGLClientVersion = glslang::EShTargetOpenGL_450;
ClientVersion = glslang::EShTargetOpenGL_450;
} else if (strcmp(argv[1], "spirv1.0") == 0) {
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_0;
} else if (strcmp(argv[1], "spirv1.1") == 0) {
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_1;
} else if (strcmp(argv[1], "spirv1.2") == 0) {
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_2;
} else if (strcmp(argv[1], "spirv1.3") == 0) {
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_3;
} else if (strcmp(argv[1], "spirv1.4") == 0) {
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_4;
} else if (strcmp(argv[1], "spirv1.5") == 0) {
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_5;
} else
Error("--target-env expected vulkan1.0, vulkan1.1, or opengl");
Error("--target-env expected one of: vulkan1.0, vulkan1.1, vulkan1.2, opengl,\n"
"spirv1.0, spirv1.1, spirv1.2, spirv1.3, spirv1.4, or spirv1.5");
}
bumpArg();
} else if (lowerword == "undef-macro" ||
lowerword == "u") {
if (argc > 1)
UserPreamble.addUndef(argv[1]);
else
Error("expects <name>", argv[0]);
bumpArg();
} else if (lowerword == "variable-name" || // synonyms
lowerword == "vn") {
Options |= EOptionOutputHexadecimal;
if (argc <= 1)
Error("no <C-variable-name> provided for --variable-name");
Error("no <C-variable-name> provided", lowerword.c_str());
variableName = argv[1];
bumpArg();
break;
} else if (lowerword == "version") {
Options |= EOptionDumpVersions;
} else {
usage();
Error("unrecognized command-line option", argv[0]);
}
}
break;
@ -549,13 +666,16 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
if (argv[0][2] == 0)
Options |= EOptionReadHlsl;
else
UserPreamble.addDef(getStringOperand("-D<macro> macro name"));
UserPreamble.addDef(getStringOperand("-D<name[=def]>"));
break;
case 'u':
uniformLocationOverrides.push_back(getUniformOverride());
break;
case 'E':
Options |= EOptionOutputPreprocessed;
break;
case 'G':
// OpenGL Client
// OpenGL client
setOpenGlSpv();
if (argv[0][2] != 0)
ClientInputSemanticsVersion = getAttachedNumber("-G<num> client input semantics");
@ -589,7 +709,7 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
bumpArg();
break;
case 'U':
UserPreamble.addUndef(getStringOperand("-U<macro>: macro name"));
UserPreamble.addUndef(getStringOperand("-U<name>"));
break;
case 'V':
setVulkanSpv();
@ -607,8 +727,6 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
Options |= EOptionDefaultDesktop;
break;
case 'e':
// HLSL todo: entry point handle needs much more sophistication.
// This is okay for one compilation unit with one entry point.
entryPointName = argv[1];
if (argc <= 1)
Error("no <name> provided for -e");
@ -663,7 +781,7 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
Options |= EOptionOutputHexadecimal;
break;
default:
usage();
Error("unrecognized command-line option", argv[0]);
break;
}
} else {
@ -679,8 +797,17 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
Error("must provide -S when --stdin is given");
// Make sure that -E is not specified alongside linking (which includes SPV generation)
if ((Options & EOptionOutputPreprocessed) && (Options & EOptionLinkProgram))
Error("can't use -E when linking is selected");
// Or things that require linking
if (Options & EOptionOutputPreprocessed) {
if (Options & EOptionLinkProgram)
Error("can't use -E when linking is selected");
if (Options & EOptionDumpReflection)
Error("reflection requires linking, which can't be used when -E when is selected");
}
// reflection requires linking
if ((Options & EOptionDumpReflection) && !(Options & EOptionLinkProgram))
Error("reflection requires -l for linking");
// -o or -x makes no sense if there is no target binary
if (binaryFileName && (Options & EOptionSpv) == 0)
@ -689,6 +816,32 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
if ((Options & EOptionFlattenUniformArrays) != 0 &&
(Options & EOptionReadHlsl) == 0)
Error("uniform array flattening only valid when compiling HLSL source.");
// rationalize client and target language
if (TargetLanguage == glslang::EShTargetNone) {
switch (ClientVersion) {
case glslang::EShTargetVulkan_1_0:
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_0;
break;
case glslang::EShTargetVulkan_1_1:
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_3;
break;
case glslang::EShTargetVulkan_1_2:
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_5;
break;
case glslang::EShTargetOpenGL_450:
TargetLanguage = glslang::EShTargetSpv;
TargetVersion = glslang::EShTargetSpv_1_0;
break;
default:
break;
}
}
if (TargetLanguage != glslang::EShTargetNone && Client == glslang::EShClientNone)
Error("To generate SPIR-V, also specify client semantics. See -G and -V.");
}
//
@ -722,6 +875,10 @@ void SetMessageOptions(EShMessages& messages)
messages = (EShMessages)(messages | EShMsgHlslEnable16BitTypes);
if ((Options & EOptionOptimizeDisable) || !ENABLE_OPT)
messages = (EShMessages)(messages | EShMsgHlslLegalization);
if (HlslDX9compatible)
messages = (EShMessages)(messages | EShMsgHlslDX9Compatible);
if (DumpBuiltinSymbols)
messages = (EShMessages)(messages | EShMsgBuiltinSymbolTable);
}
//
@ -734,17 +891,18 @@ void CompileShaders(glslang::TWorklist& worklist)
glslang::TWorkItem* workItem;
if (Options & EOptionStdin) {
worklist.remove(workItem);
ShHandle compiler = ShConstructCompiler(FindLanguage("stdin"), Options);
if (compiler == 0)
return;
if (worklist.remove(workItem)) {
ShHandle compiler = ShConstructCompiler(FindLanguage("stdin"), Options);
if (compiler == nullptr)
return;
CompileFile("stdin", compiler);
CompileFile("stdin", compiler);
if (! (Options & EOptionSuppressInfolog))
workItem->results = ShGetInfoLog(compiler);
ShDestruct(compiler);
ShDestruct(compiler);
}
} else {
while (worklist.remove(workItem)) {
ShHandle compiler = ShConstructCompiler(FindLanguage(workItem->name), Options);
@ -836,7 +994,7 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
const auto &compUnit = *it;
glslang::TShader* shader = new glslang::TShader(compUnit.stage);
shader->setStringsWithLengthsAndNames(compUnit.text, NULL, compUnit.fileNameList, compUnit.count);
if (entryPointName) // HLSL todo: this needs to be tracked per compUnits
if (entryPointName)
shader->setEntryPoint(entryPointName);
if (sourceEntryPointName) {
if (entryPointName == nullptr)
@ -848,53 +1006,60 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
shader->setPreamble(UserPreamble.get());
shader->addProcesses(Processes);
#ifndef GLSLANG_WEB
// Set IO mapper binding shift values
for (int r = 0; r < glslang::EResCount; ++r) {
const glslang::TResourceType res = glslang::TResourceType(r);
// Set base bindings
shader->setShiftBinding(res, baseBinding[res][compUnit.stage]);
// Set bindings for particular resource sets
// TODO: use a range based for loop here, when available in all environments.
for (auto i = baseBindingForSet[res][compUnit.stage].begin();
i != baseBindingForSet[res][compUnit.stage].end(); ++i)
shader->setShiftBindingForSet(res, i->second, i->first);
}
shader->setFlattenUniformArrays((Options & EOptionFlattenUniformArrays) != 0);
shader->setNoStorageFormat((Options & EOptionNoStorageFormat) != 0);
shader->setResourceSetBinding(baseResourceSetBinding[compUnit.stage]);
if (Options & EOptionHlslIoMapping)
shader->setHlslIoMapping(true);
if (Options & EOptionAutoMapBindings)
shader->setAutoMapBindings(true);
if (Options & EOptionAutoMapLocations)
shader->setAutoMapLocations(true);
for (auto& uniOverride : uniformLocationOverrides) {
shader->addUniformLocationOverride(uniOverride.first.c_str(),
uniOverride.second);
}
shader->setUniformLocationBase(uniformBase);
#endif
shader->setNanMinMaxClamp(NaNClamp);
#ifdef ENABLE_HLSL
shader->setFlattenUniformArrays((Options & EOptionFlattenUniformArrays) != 0);
if (Options & EOptionHlslIoMapping)
shader->setHlslIoMapping(true);
#endif
if (Options & EOptionInvertY)
shader->setInvertY(true);
// Set up the environment, some subsettings take precedence over earlier
// ways of setting things.
if (Options & EOptionSpv) {
if (Options & EOptionVulkanRules) {
shader->setEnvInput((Options & EOptionReadHlsl) ? glslang::EShSourceHlsl
: glslang::EShSourceGlsl,
compUnit.stage, glslang::EShClientVulkan, ClientInputSemanticsVersion);
shader->setEnvClient(glslang::EShClientVulkan, VulkanClientVersion);
} else {
shader->setEnvInput((Options & EOptionReadHlsl) ? glslang::EShSourceHlsl
: glslang::EShSourceGlsl,
compUnit.stage, glslang::EShClientOpenGL, ClientInputSemanticsVersion);
shader->setEnvClient(glslang::EShClientOpenGL, OpenGLClientVersion);
}
shader->setEnvTarget(glslang::EShTargetSpv, TargetVersion);
shader->setEnvInput((Options & EOptionReadHlsl) ? glslang::EShSourceHlsl
: glslang::EShSourceGlsl,
compUnit.stage, Client, ClientInputSemanticsVersion);
shader->setEnvClient(Client, ClientVersion);
shader->setEnvTarget(TargetLanguage, TargetVersion);
#ifdef ENABLE_HLSL
if (targetHlslFunctionality1)
shader->setEnvTargetHlslFunctionality1();
#endif
}
shaders.push_back(shader);
@ -904,10 +1069,10 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
DirStackFileIncluder includer;
std::for_each(IncludeDirectoryList.rbegin(), IncludeDirectoryList.rend(), [&includer](const std::string& dir) {
includer.pushExternalLocalDirectory(dir); });
#ifndef GLSLANG_WEB
if (Options & EOptionOutputPreprocessed) {
std::string str;
if (shader->preprocess(&Resources, defaultVersion, ENoProfile, false, false,
messages, &str, includer)) {
if (shader->preprocess(&Resources, defaultVersion, ENoProfile, false, false, messages, &str, includer)) {
PutsIfNonEmpty(str.c_str());
} else {
CompileFailed = true;
@ -916,6 +1081,8 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
StderrIfNonEmpty(shader->getInfoDebugLog());
continue;
}
#endif
if (! shader->parse(&Resources, defaultVersion, false, messages, includer))
CompileFailed = true;
@ -937,11 +1104,13 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
if (! (Options & EOptionOutputPreprocessed) && ! program.link(messages))
LinkFailed = true;
#ifndef GLSLANG_WEB
// Map IO
if (Options & EOptionSpv) {
if (!program.mapIO())
LinkFailed = true;
}
#endif
// Report
if (! (Options & EOptionSuppressInfolog) &&
@ -950,11 +1119,13 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
PutsIfNonEmpty(program.getInfoDebugLog());
}
#ifndef GLSLANG_WEB
// Reflect
if (Options & EOptionDumpReflection) {
program.buildReflection();
program.buildReflection(ReflectOptions);
program.dumpReflection();
}
#endif
// Dump SPIR-V
if (Options & EOptionSpv) {
@ -971,6 +1142,8 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
spvOptions.generateDebugInfo = true;
spvOptions.disableOptimizer = (Options & EOptionOptimizeDisable) != 0;
spvOptions.optimizeSize = (Options & EOptionOptimizeSize) != 0;
spvOptions.disassemble = SpvToolsDisassembler;
spvOptions.validate = SpvToolsValidate;
glslang::GlslangToSpv(*program.getIntermediate((EShLanguage)stage), spirv, &logger, &spvOptions);
// Dump the spv to a file or stdout, etc., but only if not doing
@ -982,9 +1155,10 @@ void CompileAndLinkShaderUnits(std::vector<ShaderCompUnit> compUnits)
} else {
glslang::OutputSpvBin(spirv, GetBinaryName((EShLanguage)stage));
}
if (Options & EOptionHumanReadableSpv) {
#ifndef GLSLANG_WEB
if (!SpvToolsDisassembler && (Options & EOptionHumanReadableSpv))
spv::Disassemble(std::cout, spirv);
}
#endif
}
}
}
@ -1072,11 +1246,13 @@ int singleMain()
workList.add(item.get());
});
#ifndef GLSLANG_WEB
if (Options & EOptionDumpConfig) {
printf("%s", glslang::GetDefaultTBuiltInResourceString().c_str());
if (workList.empty())
return ESuccess;
}
#endif
if (Options & EOptionDumpBareVersion) {
printf("%d.%d.%d\n",
@ -1111,13 +1287,15 @@ int singleMain()
ProcessConfigFile();
if ((Options & EOptionReadHlsl) && !((Options & EOptionOutputPreprocessed) || (Options & EOptionSpv)))
Error("HLSL requires SPIR-V code generation (or preprocessing only)");
//
// Two modes:
// 1) linking all arguments together, single-threaded, new C++ interface
// 2) independent arguments, can be tackled by multiple asynchronous threads, for testing thread safety, using the old handle interface
//
if (Options & EOptionLinkProgram ||
Options & EOptionOutputPreprocessed) {
if (Options & (EOptionLinkProgram | EOptionOutputPreprocessed)) {
glslang::InitializeProcess();
glslang::InitializeProcess(); // also test reference counting of users
glslang::InitializeProcess(); // also test reference counting of users
@ -1195,7 +1373,14 @@ int C_DECL main(int argc, char* argv[])
// .geom = geometry
// .frag = fragment
// .comp = compute
//
// .rgen = ray generation
// .rint = ray intersection
// .rahit = ray any hit
// .rchit = ray closest hit
// .rmiss = ray miss
// .rcall = ray callable
// .mesh = mesh
// .task = task
// Additionally, the file names may end in .<stage>.glsl and .<stage>.hlsl
// where <stage> is one of the stages listed above.
//
@ -1239,6 +1424,22 @@ EShLanguage FindLanguage(const std::string& name, bool parseStageName)
return EShLangFragment;
else if (stageName == "comp")
return EShLangCompute;
else if (stageName == "rgen")
return EShLangRayGenNV;
else if (stageName == "rint")
return EShLangIntersectNV;
else if (stageName == "rahit")
return EShLangAnyHitNV;
else if (stageName == "rchit")
return EShLangClosestHitNV;
else if (stageName == "rmiss")
return EShLangMissNV;
else if (stageName == "rcall")
return EShLangCallableNV;
else if (stageName == "mesh")
return EShLangMeshNV;
else if (stageName == "task")
return EShLangTaskNV;
usage();
return EShLangVertex;
@ -1308,30 +1509,39 @@ void usage()
" .geom for a geometry shader\n"
" .frag for a fragment shader\n"
" .comp for a compute shader\n"
" .mesh for a mesh shader\n"
" .task for a task shader\n"
" .rgen for a ray generation shader\n"
" .rint for a ray intersection shader\n"
" .rahit for a ray any hit shader\n"
" .rchit for a ray closest hit shader\n"
" .rmiss for a ray miss shader\n"
" .rcall for a ray callable shader\n"
" .glsl for .vert.glsl, .tesc.glsl, ..., .comp.glsl compound suffixes\n"
" .hlsl for .vert.hlsl, .tesc.hlsl, ..., .comp.hlsl compound suffixes\n"
"\n"
"Options:\n"
" -C cascading errors; risk crash from accumulation of error recoveries\n"
" -D input is HLSL (default when any suffix is .hlsl)\n"
" -D<macro=def>\n"
" -D<macro> define a pre-processor macro\n"
" -D input is HLSL (this is the default when any suffix is .hlsl)\n"
" -D<name[=def]> | --define-macro <name[=def]> | --D <name[=def]>\n"
" define a pre-processor macro\n"
" -E print pre-processed GLSL; cannot be used with -l;\n"
" errors will appear on stderr.\n"
" errors will appear on stderr\n"
" -G[ver] create SPIR-V binary, under OpenGL semantics; turns on -l;\n"
" default file name is <stage>.spv (-o overrides this)\n"
" default file name is <stage>.spv (-o overrides this);\n"
" 'ver', when present, is the version of the input semantics,\n"
" which will appear in #define GL_SPIRV ver\n"
" '--client opengl100' is the same as -G100\n"
" which will appear in #define GL_SPIRV ver;\n"
" '--client opengl100' is the same as -G100;\n"
" a '--target-env' for OpenGL will also imply '-G'\n"
" -H print human readable form of SPIR-V; turns on -V\n"
" -I<dir> add dir to the include search path; includer's directory\n"
" is searched first, followed by left-to-right order of -I\n"
" -Od disables optimization. May cause illegal SPIR-V for HLSL.\n"
" -Os optimizes SPIR-V to minimize size.\n"
" -Od disables optimization; may cause illegal SPIR-V for HLSL\n"
" -Os optimizes SPIR-V to minimize size\n"
" -S <stage> uses specified stage rather than parsing the file extension\n"
" choices for <stage> are vert, tesc, tese, geom, frag, or comp\n"
" -U<macro> undefine a pre-processor macro\n"
" -U<name> | --undef-macro <name> | --U <name>\n"
" undefine a pre-processor macro\n"
" -V[ver] create SPIR-V binary, under Vulkan semantics; turns on -l;\n"
" default file name is <stage>.spv (-o overrides this)\n"
" 'ver', when present, is the version of the input semantics,\n"
@ -1342,89 +1552,116 @@ void usage()
" creates the default configuration file (redirect to a .conf file)\n"
" -d default to desktop (#version 110) when there is no shader #version\n"
" (default is ES version 100)\n"
" -e <name> specify <name> as the entry-point name\n"
" -e <name> | --entry-point <name>\n"
" specify <name> as the entry-point function name\n"
" -f{hlsl_functionality1}\n"
" 'hlsl_functionality1' enables use of the\n"
" SPV_GOOGLE_hlsl_functionality1 extension\n"
" SPV_GOOGLE_hlsl_functionality1 extension\n"
" -g generate debug information\n"
" -h print this usage message\n"
" -i intermediate tree (glslang AST) is printed out\n"
" -l link all input files together to form a single module\n"
" -m memory leak mode\n"
" -o <file> save binary to <file>, requires a binary option (e.g., -V)\n"
" -q dump reflection query database\n"
" -r synonym for --relaxed-errors\n"
" -q dump reflection query database; requires -l for linking\n"
" -r | --relaxed-errors"
" relaxed GLSL semantic error-checking mode\n"
" -s silence syntax and semantic error reporting\n"
" -t multi-threaded mode\n"
" -v print version strings\n"
" -w synonym for --suppress-warnings\n"
" -v | --version\n"
" print version strings\n"
" -w | --suppress-warnings\n"
" suppress GLSL warnings, except as required by \"#extension : warn\"\n"
" -x save binary output as text-based 32-bit hexadecimal numbers\n"
" --auto-map-bindings automatically bind uniform variables\n"
" without explicit bindings.\n"
" --amb synonym for --auto-map-bindings\n"
" --auto-map-locations automatically locate input/output lacking\n"
" 'location' (fragile, not cross stage)\n"
" --aml synonym for --auto-map-locations\n"
" --client {vulkan<ver>|opengl<ver>} see -V and -G\n"
" -dumpfullversion print bare major.minor.patchlevel\n"
" -dumpversion same as -dumpfullversion\n"
" --flatten-uniform-arrays flatten uniform texture/sampler arrays to\n"
" scalars\n"
" --fua synonym for --flatten-uniform-arrays\n"
" --hlsl-offsets Allow block offsets to follow HLSL rules\n"
" Works independently of source language\n"
" --hlsl-iomap Perform IO mapping in HLSL register space\n"
" --hlsl-enable-16bit-types Allow use of 16-bit types in SPIR-V for HLSL\n"
" --invert-y | --iy invert position.Y output in vertex shader\n"
" --keep-uncalled don't eliminate uncalled functions\n"
" --ku synonym for --keep-uncalled\n"
" --no-storage-format use Unknown image format\n"
" --nsf synonym for --no-storage-format\n"
" --relaxed-errors relaxed GLSL semantic error-checking mode\n"
" -u<name>:<loc> specify a uniform location override for --aml\n"
" --uniform-base <base> set a base to use for generated uniform locations\n"
" --auto-map-bindings | --amb automatically bind uniform variables\n"
" without explicit bindings\n"
" --auto-map-locations | --aml automatically locate input/output lacking\n"
" 'location' (fragile, not cross stage)\n"
" --client {vulkan<ver>|opengl<ver>} see -V and -G\n"
" --dump-builtin-symbols prints builtin symbol table prior each compile\n"
" -dumpfullversion | -dumpversion print bare major.minor.patchlevel\n"
" --flatten-uniform-arrays | --fua flatten uniform texture/sampler arrays to\n"
" scalars\n"
" --hlsl-offsets allow block offsets to follow HLSL rules\n"
" works independently of source language\n"
" --hlsl-iomap perform IO mapping in HLSL register space\n"
" --hlsl-enable-16bit-types allow 16-bit types in SPIR-V for HLSL\n"
" --hlsl-dx9-compatible interprets sampler declarations as a\n"
" texture/sampler combo like DirectX9 would.\n"
" --invert-y | --iy invert position.Y output in vertex shader\n"
" --keep-uncalled | --ku don't eliminate uncalled functions\n"
" --nan-clamp favor non-NaN operand in min, max, and clamp\n"
" --no-storage-format | --nsf use Unknown image format\n"
" --reflect-strict-array-suffix use strict array suffix rules when\n"
" reflecting\n"
" --reflect-basic-array-suffix arrays of basic types will have trailing [0]\n"
" --reflect-intermediate-io reflection includes inputs/outputs of linked\n"
" shaders rather than just vertex/fragment\n"
" --reflect-separate-buffers reflect buffer variables and blocks\n"
" separately to uniforms\n"
" --reflect-all-block-variables reflect all variables in blocks, whether\n"
" inactive or active\n"
" --reflect-unwrap-io-blocks unwrap input/output blocks the same as\n"
" uniform blocks\n"
" --resource-set-binding [stage] name set binding\n"
" Set descriptor set and binding for individual resources\n"
" set descriptor set and binding for\n"
" individual resources\n"
" --resource-set-binding [stage] set\n"
" Set descriptor set for all resources\n"
" --rsb [stage] type set binding synonym for --resource-set-binding\n"
" --shift-image-binding [stage] num base binding number for images (uav)\n"
" --shift-image-binding [stage] [num set]... per-descriptor-set shift values\n"
" --sib [stage] num synonym for --shift-image-binding\n"
" --shift-sampler-binding [stage] num base binding number for samplers\n"
" --shift-sampler-binding [stage] [num set]... per-descriptor-set shift values\n"
" --ssb [stage] num synonym for --shift-sampler-binding\n"
" --shift-ssbo-binding [stage] num base binding number for SSBOs\n"
" --shift-ssbo-binding [stage] [num set]... per-descriptor-set shift values\n"
" --sbb [stage] num synonym for --shift-ssbo-binding\n"
" --shift-texture-binding [stage] num base binding number for textures\n"
" --shift-texture-binding [stage] [num set]... per-descriptor-set shift values\n"
" --stb [stage] num synonym for --shift-texture-binding\n"
" --shift-uav-binding [stage] num base binding number for UAVs\n"
" --shift-uav-binding [stage] [num set]... per-descriptor-set shift values\n"
" --suavb [stage] num synonym for --shift-uav-binding\n"
" --shift-UBO-binding [stage] num base binding number for UBOs\n"
" --shift-UBO-binding [stage] [num set]... per-descriptor-set shift values\n"
" --shift-cbuffer-binding [stage] num synonym for --shift-UBO-binding\n"
" --shift-cbuffer-binding [stage] [num set]... per-descriptor-set shift values\n"
" --sub [stage] num synonym for --shift-UBO-binding\n"
" --source-entrypoint <name> the given shader source function is\n"
" renamed to be the <name> given in -e\n"
" --sep synonym for --source-entrypoint\n"
" --stdin Read from stdin instead of from a file.\n"
" You'll have to provide the shader stage\n"
" using -S.\n"
" --suppress-warnings suppress GLSL warnings\n"
" (except as required by #extension : warn)\n"
" --target-env {vulkan1.0 | vulkan1.1 | opengl} \n"
" set execution environment that emitted code\n"
" will execute in (as opposed to the language\n"
" semantics selected by --client) defaults:\n"
" 'vulkan1.0' under '--client vulkan<ver>'\n"
" 'opengl' under '--client opengl<ver>'\n"
" --variable-name <name> Creates a C header file that contains a\n"
" uint32_t array named <name>\n"
" initialized with the shader binary code.\n"
" --version synonym for -v\n"
" --vn <name> synonym for --variable-name <name>\n"
" set descriptor set for all resources\n"
" --rsb synonym for --resource-set-binding\n"
" --shift-image-binding [stage] num\n"
" base binding number for images (uav)\n"
" --shift-image-binding [stage] [num set]...\n"
" per-descriptor-set shift values\n"
" --sib synonym for --shift-image-binding\n"
" --shift-sampler-binding [stage] num\n"
" base binding number for samplers\n"
" --shift-sampler-binding [stage] [num set]...\n"
" per-descriptor-set shift values\n"
" --ssb synonym for --shift-sampler-binding\n"
" --shift-ssbo-binding [stage] num base binding number for SSBOs\n"
" --shift-ssbo-binding [stage] [num set]...\n"
" per-descriptor-set shift values\n"
" --sbb synonym for --shift-ssbo-binding\n"
" --shift-texture-binding [stage] num\n"
" base binding number for textures\n"
" --shift-texture-binding [stage] [num set]...\n"
" per-descriptor-set shift values\n"
" --stb synonym for --shift-texture-binding\n"
" --shift-uav-binding [stage] num base binding number for UAVs\n"
" --shift-uav-binding [stage] [num set]...\n"
" per-descriptor-set shift values\n"
" --suavb synonym for --shift-uav-binding\n"
" --shift-UBO-binding [stage] num base binding number for UBOs\n"
" --shift-UBO-binding [stage] [num set]...\n"
" per-descriptor-set shift values\n"
" --sub synonym for --shift-UBO-binding\n"
" --shift-cbuffer-binding | --scb synonyms for --shift-UBO-binding\n"
" --spirv-dis output standard-form disassembly; works only\n"
" when a SPIR-V generation option is also used\n"
" --spirv-val execute the SPIRV-Tools validator\n"
" --source-entrypoint <name> the given shader source function is\n"
" renamed to be the <name> given in -e\n"
" --sep synonym for --source-entrypoint\n"
" --stdin read from stdin instead of from a file;\n"
" requires providing the shader stage using -S\n"
" --target-env {vulkan1.0 | vulkan1.1 | vulkan1.2 | opengl | \n"
" spirv1.0 | spirv1.1 | spirv1.2 | spirv1.3 | spirv1.4 | spirv1.5}\n"
" Set the execution environment that the\n"
" generated code will be executed in.\n"
" Defaults to:\n"
" * vulkan1.0 under --client vulkan<ver>\n"
" * opengl under --client opengl<ver>\n"
" * spirv1.0 under --target-env vulkan1.0\n"
" * spirv1.3 under --target-env vulkan1.1\n"
" * spirv1.5 under --target-env vulkan1.2\n"
" Multiple --target-env can be specified.\n"
" --variable-name <name>\n"
" --vn <name> creates a C header file that contains a\n"
" uint32_t array named <name>\n"
" initialized with the shader binary code\n"
);
exit(EFailUsage);

View File

@ -227,7 +227,7 @@ namespace {
}
}
else if (arg == "--version" || arg == "-V") {
std::cout << basename(argv[0]) << " version 0.97 " << __DATE__ << " " << __TIME__ << std::endl;
std::cout << basename(argv[0]) << " version 0.97" << std::endl;
exit(0);
} else if (arg == "--input" || arg == "-i") {
// Collect input files

View File

@ -0,0 +1,37 @@
# Copyright (C) 2018 Google, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# These are variables that are overridable by projects that include glslang.
# The path to glslang dependencies.
glslang_spirv_tools_dir = "//Externals/spirv-tools"

26
Externals/glslang/glslang/CMakeLists.txt vendored Executable file → Normal file
View File

@ -6,7 +6,12 @@ else(WIN32)
message("unknown platform")
endif(WIN32)
if(EMSCRIPTEN OR ENABLE_GLSLANG_WEB)
add_subdirectory(OSDependent/Web)
endif(EMSCRIPTEN OR ENABLE_GLSLANG_WEB)
set(SOURCES
MachineIndependent/glslang.m4
MachineIndependent/glslang.y
MachineIndependent/glslang_tab.cpp
MachineIndependent/attribute.cpp
@ -71,20 +76,15 @@ set(HEADERS
MachineIndependent/preprocessor/PpContext.h
MachineIndependent/preprocessor/PpTokens.h)
# This might be useful for making grammar changes:
#
# find_package(BISON)
# add_custom_command(OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp.h
# COMMAND ${BISON_EXECUTABLE} --defines=${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp.h -t ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang.y -o ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp
# MAIN_DEPENDENCY MachineIndependent/glslang.y
# WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
# set(BISON_GLSLParser_OUTPUT_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp)
glslang_pch(SOURCES MachineIndependent/pch.cpp)
add_library(glslang ${LIB_TYPE} ${BISON_GLSLParser_OUTPUT_SOURCE} ${SOURCES} ${HEADERS})
set_property(TARGET glslang PROPERTY FOLDER glslang)
set_property(TARGET glslang PROPERTY POSITION_INDEPENDENT_CODE ON)
target_link_libraries(glslang OGLCompiler OSDependent)
target_include_directories(glslang PUBLIC ..)
target_include_directories(glslang PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
if(WIN32 AND BUILD_SHARED_LIBS)
set_target_properties(glslang PROPERTIES PREFIX "")
@ -104,13 +104,15 @@ endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
if(BUILD_SHARED_LIBS)
install(TARGETS glslang
install(TARGETS glslang EXPORT glslangTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
else()
install(TARGETS glslang
install(TARGETS glslang EXPORT glslangTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
install(EXPORT glslangTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)
if(ENABLE_GLSLANG_INSTALL)

View File

@ -61,6 +61,8 @@ enum TBasicType {
EbtSampler,
EbtStruct,
EbtBlock,
EbtAccStructNV,
EbtReference,
// HLSL types that live only temporarily.
EbtString,
@ -88,6 +90,12 @@ enum TStorageQualifier {
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
EvqPayloadNV,
EvqPayloadInNV,
EvqHitAttrNV,
EvqCallableDataNV,
EvqCallableDataInNV,
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
@ -207,7 +215,6 @@ enum TBuiltInVariable {
EbvSampleMask,
EbvHelperInvocation,
#ifdef AMD_EXTENSIONS
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
@ -215,19 +222,54 @@ enum TBuiltInVariable {
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
#endif
EbvViewIndex,
EbvDeviceIndex,
#ifdef NV_EXTENSIONS
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
EbvPositionPerViewNV,
EbvViewportMaskPerViewNV,
EbvFragFullyCoveredNV,
#endif
EbvFragmentSizeNV,
EbvInvocationsPerPixelNV,
// ray tracing
EbvLaunchIdNV,
EbvLaunchSizeNV,
EbvInstanceCustomIndexNV,
EbvWorldRayOriginNV,
EbvWorldRayDirectionNV,
EbvObjectRayOriginNV,
EbvObjectRayDirectionNV,
EbvRayTminNV,
EbvRayTmaxNV,
EbvHitTNV,
EbvHitKindNV,
EbvObjectToWorldNV,
EbvWorldToObjectNV,
EbvIncomingRayFlagsNV,
// barycentrics
EbvBaryCoordNV,
EbvBaryCoordNoPerspNV,
// mesh shaders
EbvTaskCountNV,
EbvPrimitiveCountNV,
EbvPrimitiveIndicesNV,
EbvClipDistancePerViewNV,
EbvCullDistancePerViewNV,
EbvLayerPerViewNV,
EbvMeshViewCountNV,
EbvMeshViewIndicesNV,
// sm builtins
EbvWarpsPerSM,
EbvSMCount,
EbvWarpID,
EbvSMID,
// HLSL built-ins that live only temporarily, until they get remapped
// to one of the above.
@ -247,6 +289,19 @@ enum TBuiltInVariable {
EbvLast
};
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
#ifdef GLSLANG_WEB
__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; }
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; }
#else
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
@ -273,6 +328,11 @@ __inline const char* GetStorageQualifierString(TStorageQualifier q)
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
case EvqPayloadNV: return "rayPayloadNV"; break;
case EvqPayloadInNV: return "rayPayloadInNV"; break;
case EvqHitAttrNV: return "hitAttributeNV"; break;
case EvqCallableDataNV: return "callableDataNV"; break;
case EvqCallableDataInNV: return "callableDataInNV"; break;
default: return "unknown qualifier";
}
}
@ -287,6 +347,8 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvLocalInvocationId: return "LocalInvocationID";
case EbvGlobalInvocationId: return "GlobalInvocationID";
case EbvLocalInvocationIndex: return "LocalInvocationIndex";
case EbvNumSubgroups: return "NumSubgroups";
case EbvSubgroupID: return "SubgroupID";
case EbvSubGroupSize: return "SubGroupSize";
case EbvSubGroupInvocation: return "SubGroupInvocation";
case EbvSubGroupEqMask: return "SubGroupEqMask";
@ -294,6 +356,13 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSubGroupGtMask: return "SubGroupGtMask";
case EbvSubGroupLeMask: return "SubGroupLeMask";
case EbvSubGroupLtMask: return "SubGroupLtMask";
case EbvSubgroupSize2: return "SubgroupSize";
case EbvSubgroupInvocation2: return "SubgroupInvocationID";
case EbvSubgroupEqMask2: return "SubgroupEqMask";
case EbvSubgroupGeMask2: return "SubgroupGeMask";
case EbvSubgroupGtMask2: return "SubgroupGtMask";
case EbvSubgroupLeMask2: return "SubgroupLeMask";
case EbvSubgroupLtMask2: return "SubgroupLtMask";
case EbvVertexId: return "VertexId";
case EbvInstanceId: return "InstanceId";
case EbvVertexIndex: return "VertexIndex";
@ -345,7 +414,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
#ifdef AMD_EXTENSIONS
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
@ -353,32 +421,57 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
#endif
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
#ifdef NV_EXTENSIONS
case EbvFragSizeEXT: return "FragSizeEXT";
case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case EbvPositionPerViewNV: return "PositionPerViewNV";
case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
#endif
case EbvFragmentSizeNV: return "FragmentSizeNV";
case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV";
case EbvLaunchIdNV: return "LaunchIdNV";
case EbvLaunchSizeNV: return "LaunchSizeNV";
case EbvInstanceCustomIndexNV: return "InstanceCustomIndexNV";
case EbvWorldRayOriginNV: return "WorldRayOriginNV";
case EbvWorldRayDirectionNV: return "WorldRayDirectionNV";
case EbvObjectRayOriginNV: return "ObjectRayOriginNV";
case EbvObjectRayDirectionNV: return "ObjectRayDirectionNV";
case EbvRayTminNV: return "ObjectRayTminNV";
case EbvRayTmaxNV: return "ObjectRayTmaxNV";
case EbvHitTNV: return "HitTNV";
case EbvHitKindNV: return "HitKindNV";
case EbvIncomingRayFlagsNV: return "IncomingRayFlagsNV";
case EbvObjectToWorldNV: return "ObjectToWorldNV";
case EbvWorldToObjectNV: return "WorldToObjectNV";
case EbvBaryCoordNV: return "BaryCoordNV";
case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
case EbvTaskCountNV: return "TaskCountNV";
case EbvPrimitiveCountNV: return "PrimitiveCountNV";
case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV";
case EbvCullDistancePerViewNV: return "CullDistancePerViewNV";
case EbvLayerPerViewNV: return "LayerPerViewNV";
case EbvMeshViewCountNV: return "MeshViewCountNV";
case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
case EbvWarpsPerSM: return "WarpsPerSMNV";
case EbvSMCount: return "SMCountNV";
case EbvWarpID: return "WarpIDNV";
case EbvSMID: return "SMIDNV";
default: return "unknown built-in variable";
}
}
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch (p) {
@ -389,6 +482,7 @@ __inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
default: return "unknown precision qualifier";
}
}
#endif
__inline bool isTypeSignedInt(TBasicType type)
{
@ -433,7 +527,8 @@ __inline bool isTypeFloat(TBasicType type)
}
}
__inline int getTypeRank(TBasicType type) {
__inline int getTypeRank(TBasicType type)
{
int res = -1;
switch(type) {
case EbtInt8:

View File

@ -38,7 +38,7 @@
#define _COMMON_INCLUDED_
#if defined(__ANDROID__) || _MSC_VER < 1700
#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700)
#include <sstream>
namespace std {
template<typename T>
@ -102,6 +102,7 @@ std::string to_string(const T& val) {
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include "PoolAlloc.h"
@ -229,16 +230,29 @@ inline const TString String(const int i, const int /*base*/ = 10)
#endif
struct TSourceLoc {
void init() { name = nullptr; string = 0; line = 0; column = 0; }
void init()
{
name = nullptr; string = 0; line = 0; column = 0;
}
void init(int stringNum) { init(); string = stringNum; }
// Returns the name if it exists. Otherwise, returns the string number.
std::string getStringNameOrNum(bool quoteStringName = true) const
{
if (name != nullptr)
return quoteStringName ? ("\"" + std::string(name) + "\"") : name;
if (name != nullptr) {
TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name;
std::string ret_str(qstr.c_str());
return ret_str;
}
return std::to_string((long long)string);
}
const char* name; // descriptive name for this string
const char* getFilename() const
{
if (name == nullptr)
return nullptr;
return name->c_str();
}
const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); }
TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr
int string;
int line;
int column;

View File

@ -213,6 +213,28 @@ public:
return false;
switch (type) {
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
#ifndef GLSLANG_WEB
case EbtInt16:
if (constant.i16Const == i16Const)
return true;
@ -232,16 +254,6 @@ public:
if (constant.u8Const == u8Const)
return true;
break;
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtInt64:
if (constant.i64Const == i64Const)
@ -253,16 +265,7 @@ public:
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
#endif
default:
assert(false && "Default missing");
}
@ -329,6 +332,22 @@ public:
{
assert(type == constant.type);
switch (type) {
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
#ifndef GLSLANG_WEB
case EbtInt8:
if (i8Const > constant.i8Const)
return true;
@ -348,16 +367,6 @@ public:
if (u16Const > constant.u16Const)
return true;
return false;
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const > constant.i64Const)
@ -369,11 +378,7 @@ public:
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
#endif
default:
assert(false && "Default missing");
return false;
@ -384,6 +389,7 @@ public:
{
assert(type == constant.type);
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
if (i8Const < constant.i8Const)
return true;
@ -394,7 +400,7 @@ public:
return true;
return false;
case EbtInt16:
case EbtInt16:
if (i16Const < constant.i16Const)
return true;
@ -402,17 +408,6 @@ public:
case EbtUint16:
if (u16Const < constant.u16Const)
return true;
return false;
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const < constant.i64Const)
@ -424,10 +419,21 @@ public:
return true;
return false;
#endif
case EbtDouble:
if (dConst < constant.dConst)
return true;
return false;
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
default:
assert(false && "Default missing");
@ -440,15 +446,17 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
#endif
default: assert(false && "Default missing");
}
@ -460,15 +468,17 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
#endif
default: assert(false && "Default missing");
}
@ -480,15 +490,17 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
#endif
default: assert(false && "Default missing");
}
@ -500,14 +512,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -518,6 +532,7 @@ public:
{
TConstUnion returnValue;
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
@ -570,32 +585,38 @@ public:
default: assert(false && "Default missing");
}
break;
#endif
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
#ifndef GLSLANG_WEB
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
@ -622,6 +643,7 @@ public:
default: assert(false && "Default missing");
}
break;
#endif
default: assert(false && "Default missing");
}
@ -632,6 +654,7 @@ public:
{
TConstUnion returnValue;
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
@ -684,32 +707,6 @@ public:
default: assert(false && "Default missing");
}
break;
case EbtInt:
switch (constant.type) {
case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
@ -736,6 +733,37 @@ public:
default: assert(false && "Default missing");
}
break;
#endif
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
@ -747,14 +775,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -766,14 +796,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -785,14 +817,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -803,14 +837,16 @@ public:
{
TConstUnion returnValue;
switch (type) {
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(~i8Const); break;
case EbtUint8: returnValue.setU8Const(~u8Const); break;
case EbtInt16: returnValue.setI16Const(~i16Const); break;
case EbtUint16: returnValue.setU16Const(~u16Const); break;
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
case EbtInt64: returnValue.setI64Const(~i64Const); break;
case EbtUint64: returnValue.setU64Const(~u64Const); break;
#endif
default: assert(false && "Default missing");
}

View File

@ -304,7 +304,6 @@ public:
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
void setAllocator(TPoolAllocator* a) { allocator = *a; }
TPoolAllocator& getAllocator() const { return allocator; }
protected:

View File

@ -133,6 +133,15 @@ struct TBuiltInResource {
int maxCullDistances;
int maxCombinedClipAndCullDistances;
int maxSamples;
int maxMeshOutputVerticesNV;
int maxMeshOutputPrimitivesNV;
int maxMeshWorkGroupSizeX_NV;
int maxMeshWorkGroupSizeY_NV;
int maxMeshWorkGroupSizeZ_NV;
int maxTaskWorkGroupSizeX_NV;
int maxTaskWorkGroupSizeY_NV;
int maxTaskWorkGroupSizeZ_NV;
int maxMeshViewCountNV;
TLimits limits;
};

File diff suppressed because it is too large Load Diff

View File

@ -254,7 +254,9 @@ struct TArraySizes {
void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
void addInnerSize(TArraySize pair) { sizes.push_back(pair.size, pair.node); }
void addInnerSize(TArraySize pair) {
sizes.push_back(pair.size, pair.node);
}
void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
int getImplicitSize() const { return implicitArraySize; }
@ -318,8 +320,8 @@ struct TArraySizes {
void setVariablyIndexed() { variablyIndexed = true; }
bool isVariablyIndexed() const { return variablyIndexed; }
bool operator==(const TArraySizes& rhs) { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) { return sizes != rhs.sizes; }
bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; }
protected:
TSmallArrayVector sizes;

202
Externals/glslang/glslang/Include/intermediate.h vendored Executable file → Normal file
View File

@ -85,6 +85,8 @@ enum TOperator {
EOpPreIncrement,
EOpPreDecrement,
EOpCopyObject,
// (u)int* -> bool
EOpConvInt8ToBool,
EOpConvUint8ToBool,
@ -269,6 +271,14 @@ enum TOperator {
EOpConvDoubleToFloat16,
EOpConvDoubleToFloat,
// uint64_t <-> pointer
EOpConvUint64ToPtr,
EOpConvPtrToUint64,
// uvec2 <-> pointer
EOpConvUvec2ToPtr,
EOpConvPtrToUvec2,
//
// binary operations
//
@ -416,11 +426,9 @@ enum TOperator {
EOpReflect,
EOpRefract,
#ifdef AMD_EXTENSIONS
EOpMin3,
EOpMax3,
EOpMid3,
#endif
EOpDPdx, // Fragment only
EOpDPdy, // Fragment only
@ -435,10 +443,7 @@ enum TOperator {
EOpInterpolateAtCentroid, // Fragment only
EOpInterpolateAtSample, // Fragment only
EOpInterpolateAtOffset, // Fragment only
#ifdef AMD_EXTENSIONS
EOpInterpolateAtVertex,
#endif
EOpMatrixTimesMatrix,
EOpOuterProduct,
@ -528,7 +533,6 @@ enum TOperator {
EOpSubgroupQuadSwapVertical,
EOpSubgroupQuadSwapDiagonal,
#ifdef NV_EXTENSIONS
EOpSubgroupPartition,
EOpSubgroupPartitionedAdd,
EOpSubgroupPartitionedMul,
@ -551,11 +555,9 @@ enum TOperator {
EOpSubgroupPartitionedExclusiveAnd,
EOpSubgroupPartitionedExclusiveOr,
EOpSubgroupPartitionedExclusiveXor,
#endif
EOpSubgroupGuardStop,
#ifdef AMD_EXTENSIONS
EOpMinInvocations,
EOpMaxInvocations,
EOpAddInvocations,
@ -582,7 +584,6 @@ enum TOperator {
EOpCubeFaceIndex,
EOpCubeFaceCoord,
EOpTime,
#endif
EOpAtomicAdd,
EOpAtomicMin,
@ -592,6 +593,8 @@ enum TOperator {
EOpAtomicXor,
EOpAtomicExchange,
EOpAtomicCompSwap,
EOpAtomicLoad,
EOpAtomicStore,
EOpAtomicCounterIncrement, // results in pre-increment value
EOpAtomicCounterDecrement, // results in post-decrement value
@ -609,6 +612,15 @@ enum TOperator {
EOpAny,
EOpAll,
EOpCooperativeMatrixLoad,
EOpCooperativeMatrixStore,
EOpCooperativeMatrixMulAdd,
EOpBeginInvocationInterlock, // Fragment only
EOpEndInvocationInterlock, // Fragment only
EOpIsHelperInvocation,
//
// Branch
//
@ -619,6 +631,7 @@ enum TOperator {
EOpContinue,
EOpCase,
EOpDefault,
EOpDemote, // Fragment only
//
// Constructors
@ -636,9 +649,21 @@ enum TOperator {
EOpConstructBool,
EOpConstructFloat,
EOpConstructDouble,
// Keep vector and matrix constructors in a consistent relative order for
// TParseContext::constructBuiltIn, which converts between 8/16/32 bit
// vector constructors
EOpConstructVec2,
EOpConstructVec3,
EOpConstructVec4,
EOpConstructMat2x2,
EOpConstructMat2x3,
EOpConstructMat2x4,
EOpConstructMat3x2,
EOpConstructMat3x3,
EOpConstructMat3x4,
EOpConstructMat4x2,
EOpConstructMat4x3,
EOpConstructMat4x4,
EOpConstructDVec2,
EOpConstructDVec3,
EOpConstructDVec4,
@ -669,15 +694,6 @@ enum TOperator {
EOpConstructU64Vec2,
EOpConstructU64Vec3,
EOpConstructU64Vec4,
EOpConstructMat2x2,
EOpConstructMat2x3,
EOpConstructMat2x4,
EOpConstructMat3x2,
EOpConstructMat3x3,
EOpConstructMat3x4,
EOpConstructMat4x2,
EOpConstructMat4x3,
EOpConstructMat4x4,
EOpConstructDMat2x2,
EOpConstructDMat2x3,
EOpConstructDMat2x4,
@ -730,6 +746,8 @@ enum TOperator {
EOpConstructStruct,
EOpConstructTextureSampler,
EOpConstructNonuniform, // expected to be transformed away, not present in final AST
EOpConstructReference,
EOpConstructCooperativeMatrix,
EOpConstructGuardEnd,
//
@ -772,10 +790,8 @@ enum TOperator {
EOpImageQuerySamples,
EOpImageLoad,
EOpImageStore,
#ifdef AMD_EXTENSIONS
EOpImageLoadLod,
EOpImageStoreLod,
#endif
EOpImageAtomicAdd,
EOpImageAtomicMin,
EOpImageAtomicMax,
@ -784,13 +800,13 @@ enum TOperator {
EOpImageAtomicXor,
EOpImageAtomicExchange,
EOpImageAtomicCompSwap,
EOpImageAtomicLoad,
EOpImageAtomicStore,
EOpSubpassLoad,
EOpSubpassLoadMS,
EOpSparseImageLoad,
#ifdef AMD_EXTENSIONS
EOpSparseImageLoadLod,
#endif
EOpImageGuardEnd,
@ -828,13 +844,11 @@ enum TOperator {
EOpTextureOffsetClamp,
EOpTextureGradClamp,
EOpTextureGradOffsetClamp,
#ifdef AMD_EXTENSIONS
EOpTextureGatherLod,
EOpTextureGatherLodOffset,
EOpTextureGatherLodOffsets,
EOpFragmentMaskFetch,
EOpFragmentFetch,
#endif
EOpSparseTextureGuardBegin,
@ -854,13 +868,19 @@ enum TOperator {
EOpSparseTextureOffsetClamp,
EOpSparseTextureGradClamp,
EOpSparseTextureGradOffsetClamp,
#ifdef AMD_EXTENSIONS
EOpSparseTextureGatherLod,
EOpSparseTextureGatherLodOffset,
EOpSparseTextureGatherLodOffsets,
#endif
EOpSparseTextureGuardEnd,
EOpImageFootprintGuardBegin,
EOpImageSampleFootprintNV,
EOpImageSampleFootprintClampNV,
EOpImageSampleFootprintLodNV,
EOpImageSampleFootprintGradNV,
EOpImageSampleFootprintGradClampNV,
EOpImageFootprintGuardEnd,
EOpSamplingGuardEnd,
EOpTextureGuardEnd,
@ -879,6 +899,21 @@ enum TOperator {
EOpFindLSB,
EOpFindMSB,
EOpCountLeadingZeros,
EOpCountTrailingZeros,
EOpAbsDifference,
EOpAddSaturate,
EOpSubSaturate,
EOpAverage,
EOpAverageRounded,
EOpMul32x16,
EOpTraceNV,
EOpReportIntersectionNV,
EOpIgnoreIntersectionNV,
EOpTerminateRayNV,
EOpExecuteCallableNV,
EOpWritePackedPrimitiveIndices4x8NV,
//
// HLSL operations
//
@ -962,6 +997,10 @@ enum TOperator {
EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
// Shader Clock Ops
EOpReadClockSubgroupKHR,
EOpReadClockDeviceKHR,
};
class TIntermTraverser;
@ -1063,6 +1102,8 @@ public:
virtual bool isStruct() const { return type.isStruct(); }
virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
bool isAtomic() const { return type.isAtomic(); }
bool isReference() const { return type.isReference(); }
TString getCompleteString() const { return type.getCompleteString(); }
protected:
@ -1082,7 +1123,12 @@ public:
first(testFirst),
unroll(false),
dontUnroll(false),
dependency(0)
dependency(0),
minIterations(0),
maxIterations(iterationsInfinite),
iterationMultiple(1),
peelCount(0),
partialCount(0)
{ }
virtual TIntermLoop* getAsLoopNode() { return this; }
@ -1094,14 +1140,36 @@ public:
bool testFirst() const { return first; }
void setUnroll() { unroll = true; }
void setDontUnroll() { dontUnroll = true; }
void setDontUnroll() {
dontUnroll = true;
peelCount = 0;
partialCount = 0;
}
bool getUnroll() const { return unroll; }
bool getDontUnroll() const { return dontUnroll; }
static const unsigned int dependencyInfinite = 0xFFFFFFFF;
static const unsigned int iterationsInfinite = 0xFFFFFFFF;
void setLoopDependency(int d) { dependency = d; }
int getLoopDependency() const { return dependency; }
void setMinIterations(unsigned int v) { minIterations = v; }
unsigned int getMinIterations() const { return minIterations; }
void setMaxIterations(unsigned int v) { maxIterations = v; }
unsigned int getMaxIterations() const { return maxIterations; }
void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
unsigned int getIterationMultiple() const { return iterationMultiple; }
void setPeelCount(unsigned int v) {
peelCount = v;
dontUnroll = false;
}
unsigned int getPeelCount() const { return peelCount; }
void setPartialCount(unsigned int v) {
partialCount = v;
dontUnroll = false;
}
unsigned int getPartialCount() const { return partialCount; }
protected:
TIntermNode* body; // code to loop over
TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
@ -1110,6 +1178,11 @@ protected:
bool unroll; // true if unroll requested
bool dontUnroll; // true if request to not unroll
unsigned int dependency; // loop dependency hint; 0 means not set or unknown
unsigned int minIterations; // as per the SPIR-V specification
unsigned int maxIterations; // as per the SPIR-V specification
unsigned int iterationMultiple; // as per the SPIR-V specification
unsigned int peelCount; // as per the SPIR-V specification
unsigned int partialCount; // as per the SPIR-V specification
};
//
@ -1125,6 +1198,7 @@ public:
virtual void traverse(TIntermTraverser*);
TOperator getFlowOp() const { return flowOp; }
TIntermTyped* getExpression() const { return expression; }
void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
protected:
TOperator flowOp;
TIntermTyped* expression;
@ -1158,12 +1232,13 @@ public:
// it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i, const TString& n, const TType& t)
: TIntermTyped(t), id(i),
#ifdef ENABLE_HLSL
#ifndef GLSLANG_WEB
flattenSubset(-1),
#endif
constSubtree(nullptr)
{ name = n; }
virtual int getId() const { return id; }
virtual void changeId(int i) { id = i; }
virtual const TString& getName() const { return name; }
virtual void traverse(TIntermTraverser*);
virtual TIntermSymbol* getAsSymbolNode() { return this; }
@ -1172,7 +1247,7 @@ public:
const TConstUnionArray& getConstArray() const { return constArray; }
void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
TIntermTyped* getConstSubtree() const { return constSubtree; }
#ifdef ENABLE_HLSL
#ifndef GLSLANG_WEB
void setFlattenSubset(int subset) { flattenSubset = subset; }
int getFlattenSubset() const { return flattenSubset; } // -1 means full object
#endif
@ -1183,7 +1258,7 @@ public:
protected:
int id; // the unique id of the symbol this node represents
#ifdef ENABLE_HLSL
#ifndef GLSLANG_WEB
int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
#endif
TString name; // the name of the symbol this node represents
@ -1223,9 +1298,7 @@ struct TCrackedTextureOp {
bool grad;
bool subpass;
bool lodClamp;
#ifdef AMD_EXTENSIONS
bool fragMask;
#endif
};
//
@ -1241,9 +1314,19 @@ public:
bool isConstructor() const;
bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
#ifdef GLSLANG_WEB
bool isImage() const { return false; }
bool isSparseTexture() const { return false; }
bool isImageFootprint() const { return false; }
bool isSparseImage() const { return false; }
bool isSubgroup() const { return false; }
#else
bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
bool isSparseImage() const { return op == EOpSparseImageLoad; }
bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
#endif
void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
@ -1273,9 +1356,7 @@ public:
cracked.grad = false;
cracked.subpass = false;
cracked.lodClamp = false;
#ifdef AMD_EXTENSIONS
cracked.fragMask = false;
#endif
switch (op) {
case EOpImageQuerySize:
@ -1290,10 +1371,6 @@ public:
case EOpTexture:
case EOpSparseTexture:
break;
case EOpTextureClamp:
case EOpSparseTextureClamp:
cracked.lodClamp = true;
break;
case EOpTextureProj:
cracked.proj = true;
break;
@ -1305,22 +1382,17 @@ public:
case EOpSparseTextureOffset:
cracked.offset = true;
break;
case EOpTextureOffsetClamp:
case EOpSparseTextureOffsetClamp:
cracked.offset = true;
cracked.lodClamp = true;
break;
case EOpTextureFetch:
case EOpSparseTextureFetch:
cracked.fetch = true;
if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
cracked.lod = true;
break;
case EOpTextureFetchOffset:
case EOpSparseTextureFetchOffset:
cracked.fetch = true;
cracked.offset = true;
if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
cracked.lod = true;
break;
case EOpTextureProjOffset:
@ -1345,11 +1417,6 @@ public:
case EOpSparseTextureGrad:
cracked.grad = true;
break;
case EOpTextureGradClamp:
case EOpSparseTextureGradClamp:
cracked.grad = true;
cracked.lodClamp = true;
break;
case EOpTextureGradOffset:
case EOpSparseTextureGradOffset:
cracked.grad = true;
@ -1364,6 +1431,21 @@ public:
cracked.offset = true;
cracked.proj = true;
break;
#ifndef GLSLANG_WEB
case EOpTextureClamp:
case EOpSparseTextureClamp:
cracked.lodClamp = true;
break;
case EOpTextureOffsetClamp:
case EOpSparseTextureOffsetClamp:
cracked.offset = true;
cracked.lodClamp = true;
break;
case EOpTextureGradClamp:
case EOpSparseTextureGradClamp:
cracked.grad = true;
cracked.lodClamp = true;
break;
case EOpTextureGradOffsetClamp:
case EOpSparseTextureGradOffsetClamp:
cracked.grad = true;
@ -1384,7 +1466,6 @@ public:
cracked.gather = true;
cracked.offsets = true;
break;
#ifdef AMD_EXTENSIONS
case EOpTextureGatherLod:
case EOpSparseTextureGatherLod:
cracked.gather = true;
@ -1415,11 +1496,26 @@ public:
cracked.subpass = sampler.dim == EsdSubpass;
cracked.fragMask = true;
break;
#endif
case EOpImageSampleFootprintNV:
break;
case EOpImageSampleFootprintClampNV:
cracked.lodClamp = true;
break;
case EOpImageSampleFootprintLodNV:
cracked.lod = true;
break;
case EOpImageSampleFootprintGradNV:
cracked.grad = true;
break;
case EOpImageSampleFootprintGradClampNV:
cracked.lodClamp = true;
cracked.grad = true;
break;
case EOpSubpassLoad:
case EOpSubpassLoadMS:
cracked.subpass = true;
break;
#endif
default:
break;
}

View File

@ -1,3 +1,3 @@
// This header is generated by the make-revision script.
#define GLSLANG_PATCH_LEVEL 2743
#define GLSLANG_PATCH_LEVEL 3559

View File

@ -2,6 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
@ -179,73 +180,83 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
case EbtDouble:
case EbtFloat:
case EbtFloat16:
newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst());
break;
case EbtInt8:
if (rightUnionArray[i] == 0)
newConstArray[i].setI8Const(0x7F);
else if (rightUnionArray[i].getI8Const() == -1 && leftUnionArray[i].getI8Const() == (signed char)0x80)
newConstArray[i].setI8Const((signed char)0x80);
if (rightUnionArray[i].getDConst() != 0.0)
newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst());
else if (leftUnionArray[i].getDConst() > 0.0)
newConstArray[i].setDConst((double)INFINITY);
else if (leftUnionArray[i].getDConst() < 0.0)
newConstArray[i].setDConst(-(double)INFINITY);
else
newConstArray[i].setI8Const(leftUnionArray[i].getI8Const() / rightUnionArray[i].getI8Const());
break;
case EbtUint8:
if (rightUnionArray[i] == 0) {
newConstArray[i].setU8Const(0xFF);
} else
newConstArray[i].setU8Const(leftUnionArray[i].getU8Const() / rightUnionArray[i].getU8Const());
break;
case EbtInt16:
if (rightUnionArray[i] == 0)
newConstArray[i].setI16Const(0x7FFF);
else if (rightUnionArray[i].getI16Const() == -1 && leftUnionArray[i].getI16Const() == (signed short)0x8000)
newConstArray[i].setI16Const(short(0x8000));
else
newConstArray[i].setI16Const(leftUnionArray[i].getI16Const() / rightUnionArray[i].getI16Const());
break;
case EbtUint16:
if (rightUnionArray[i] == 0) {
newConstArray[i].setU16Const(0xFFFF);
} else
newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const());
newConstArray[i].setDConst((double)NAN);
break;
case EbtInt:
if (rightUnionArray[i] == 0)
newConstArray[i].setIConst(0x7FFFFFFF);
else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)0x80000000)
newConstArray[i].setIConst(0x80000000);
else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll)
newConstArray[i].setIConst((int)-0x80000000ll);
else
newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst());
break;
case EbtUint:
if (rightUnionArray[i] == 0) {
if (rightUnionArray[i] == 0u)
newConstArray[i].setUConst(0xFFFFFFFFu);
} else
else
newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst());
break;
#ifndef GLSLANG_WEB
case EbtInt8:
if (rightUnionArray[i] == (signed char)0)
newConstArray[i].setI8Const((signed char)0x7F);
else if (rightUnionArray[i].getI8Const() == (signed char)-1 && leftUnionArray[i].getI8Const() == (signed char)-0x80)
newConstArray[i].setI8Const((signed char)-0x80);
else
newConstArray[i].setI8Const(leftUnionArray[i].getI8Const() / rightUnionArray[i].getI8Const());
break;
case EbtUint8:
if (rightUnionArray[i] == (unsigned char)0u)
newConstArray[i].setU8Const((unsigned char)0xFFu);
else
newConstArray[i].setU8Const(leftUnionArray[i].getU8Const() / rightUnionArray[i].getU8Const());
break;
case EbtInt16:
if (rightUnionArray[i] == (signed short)0)
newConstArray[i].setI16Const((signed short)0x7FFF);
else if (rightUnionArray[i].getI16Const() == (signed short)-1 && leftUnionArray[i].getI16Const() == (signed short)-0x8000)
newConstArray[i].setI16Const((signed short)-0x8000);
else
newConstArray[i].setI16Const(leftUnionArray[i].getI16Const() / rightUnionArray[i].getI16Const());
break;
case EbtUint16:
if (rightUnionArray[i] == (unsigned short)0u)
newConstArray[i].setU16Const((unsigned short)0xFFFFu);
else
newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const());
break;
case EbtInt64:
if (rightUnionArray[i] == 0)
if (rightUnionArray[i] == 0ll)
newConstArray[i].setI64Const(0x7FFFFFFFFFFFFFFFll);
else if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == (long long)0x8000000000000000)
newConstArray[i].setI64Const(0x8000000000000000);
else if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == (long long)-0x8000000000000000ll)
newConstArray[i].setI64Const((long long)-0x8000000000000000ll);
else
newConstArray[i].setI64Const(leftUnionArray[i].getI64Const() / rightUnionArray[i].getI64Const());
break;
case EbtUint64:
if (rightUnionArray[i] == 0) {
if (rightUnionArray[i] == 0ull)
newConstArray[i].setU64Const(0xFFFFFFFFFFFFFFFFull);
} else
else
newConstArray[i].setU64Const(leftUnionArray[i].getU64Const() / rightUnionArray[i].getU64Const());
break;
default:
return 0;
#endif
}
}
break;
@ -284,13 +295,12 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
newConstArray[i].setIConst(0);
break;
} else goto modulo_default;
#ifndef GLSLANG_WEB
case EbtInt64:
if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) {
newConstArray[i].setI64Const(0);
break;
} else goto modulo_default;
#ifdef AMD_EXTENSIONS
case EbtInt16:
if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) {
newConstArray[i].setIConst(0);
@ -407,8 +417,8 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpEmitStreamVertex:
case EOpEndStreamPrimitive:
// These don't actually fold
return 0;
// These don't fold
return nullptr;
case EOpPackSnorm2x16:
case EOpPackUnorm2x16:
@ -483,8 +493,6 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
break;
}
// TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
case EOpPackSnorm2x16:
case EOpPackUnorm2x16:
case EOpPackHalf2x16:
@ -502,7 +510,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpDeterminant:
case EOpMatrixInverse:
case EOpTranspose:
return 0;
return nullptr;
default:
assert(componentWise);
@ -521,16 +529,18 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EbtDouble:
case EbtFloat16:
case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break;
case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
#ifndef GLSLANG_WEB
case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break;
case EbtUint8: newConstArray[i].setU8Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU8Const()))); break;
case EbtInt16: newConstArray[i].setI16Const(-unionArray[i].getI16Const()); break;
case EbtUint16:newConstArray[i].setU16Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU16Const()))); break;
case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
case EbtInt64: newConstArray[i].setI64Const(-unionArray[i].getI64Const()); break;
case EbtUint64: newConstArray[i].setU64Const(static_cast<unsigned long long>(-static_cast<long long>(unionArray[i].getU64Const()))); break;
#endif
default:
return 0;
return nullptr;
}
break;
case EOpLogicalNot:
@ -538,7 +548,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
switch (getType().getBasicType()) {
case EbtBool: newConstArray[i].setBConst(!unionArray[i].getBConst()); break;
default:
return 0;
return nullptr;
}
break;
case EOpBitwiseNot:
@ -663,6 +673,284 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
break;
}
case EOpConvIntToBool:
newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break;
case EOpConvUintToBool:
newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break;
case EOpConvBoolToInt:
newConstArray[i].setIConst(unionArray[i].getBConst()); break;
case EOpConvBoolToUint:
newConstArray[i].setUConst(unionArray[i].getBConst()); break;
case EOpConvIntToUint:
newConstArray[i].setUConst(unionArray[i].getIConst()); break;
case EOpConvUintToInt:
newConstArray[i].setIConst(unionArray[i].getUConst()); break;
case EOpConvFloatToBool:
case EOpConvDoubleToBool:
newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
case EOpConvBoolToFloat:
case EOpConvBoolToDouble:
newConstArray[i].setDConst(unionArray[i].getBConst()); break;
case EOpConvIntToFloat:
case EOpConvIntToDouble:
newConstArray[i].setDConst(unionArray[i].getIConst()); break;
case EOpConvUintToFloat:
case EOpConvUintToDouble:
newConstArray[i].setDConst(unionArray[i].getUConst()); break;
case EOpConvDoubleToFloat:
case EOpConvFloatToDouble:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvFloatToUint:
case EOpConvDoubleToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt:
case EOpConvDoubleToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
#ifndef GLSLANG_WEB
case EOpConvInt8ToBool:
newConstArray[i].setBConst(unionArray[i].getI8Const() != 0); break;
case EOpConvUint8ToBool:
newConstArray[i].setBConst(unionArray[i].getU8Const() != 0); break;
case EOpConvInt16ToBool:
newConstArray[i].setBConst(unionArray[i].getI16Const() != 0); break;
case EOpConvUint16ToBool:
newConstArray[i].setBConst(unionArray[i].getU16Const() != 0); break;
case EOpConvInt64ToBool:
newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
case EOpConvUint64ToBool:
newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
case EOpConvFloat16ToBool:
newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
case EOpConvBoolToInt8:
newConstArray[i].setI8Const(unionArray[i].getBConst()); break;
case EOpConvBoolToUint8:
newConstArray[i].setU8Const(unionArray[i].getBConst()); break;
case EOpConvBoolToInt16:
newConstArray[i].setI16Const(unionArray[i].getBConst()); break;
case EOpConvBoolToUint16:
newConstArray[i].setU16Const(unionArray[i].getBConst()); break;
case EOpConvBoolToInt64:
newConstArray[i].setI64Const(unionArray[i].getBConst()); break;
case EOpConvBoolToUint64:
newConstArray[i].setU64Const(unionArray[i].getBConst()); break;
case EOpConvBoolToFloat16:
newConstArray[i].setDConst(unionArray[i].getBConst()); break;
case EOpConvInt8ToInt16:
newConstArray[i].setI16Const(unionArray[i].getI8Const()); break;
case EOpConvInt8ToInt:
newConstArray[i].setIConst(unionArray[i].getI8Const()); break;
case EOpConvInt8ToInt64:
newConstArray[i].setI64Const(unionArray[i].getI8Const()); break;
case EOpConvInt8ToUint8:
newConstArray[i].setU8Const(unionArray[i].getI8Const()); break;
case EOpConvInt8ToUint16:
newConstArray[i].setU16Const(unionArray[i].getI8Const()); break;
case EOpConvInt8ToUint:
newConstArray[i].setUConst(unionArray[i].getI8Const()); break;
case EOpConvInt8ToUint64:
newConstArray[i].setU64Const(unionArray[i].getI8Const()); break;
case EOpConvUint8ToInt8:
newConstArray[i].setI8Const(unionArray[i].getU8Const()); break;
case EOpConvUint8ToInt16:
newConstArray[i].setI16Const(unionArray[i].getU8Const()); break;
case EOpConvUint8ToInt:
newConstArray[i].setIConst(unionArray[i].getU8Const()); break;
case EOpConvUint8ToInt64:
newConstArray[i].setI64Const(unionArray[i].getU8Const()); break;
case EOpConvUint8ToUint16:
newConstArray[i].setU16Const(unionArray[i].getU8Const()); break;
case EOpConvUint8ToUint:
newConstArray[i].setUConst(unionArray[i].getU8Const()); break;
case EOpConvUint8ToUint64:
newConstArray[i].setU64Const(unionArray[i].getU8Const()); break;
case EOpConvInt8ToFloat16:
newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
case EOpConvInt8ToFloat:
newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
case EOpConvInt8ToDouble:
newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
case EOpConvUint8ToFloat16:
newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
case EOpConvUint8ToFloat:
newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
case EOpConvUint8ToDouble:
newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
case EOpConvInt16ToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI16Const())); break;
case EOpConvInt16ToInt:
newConstArray[i].setIConst(unionArray[i].getI16Const()); break;
case EOpConvInt16ToInt64:
newConstArray[i].setI64Const(unionArray[i].getI16Const()); break;
case EOpConvInt16ToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getI16Const())); break;
case EOpConvInt16ToUint16:
newConstArray[i].setU16Const(unionArray[i].getI16Const()); break;
case EOpConvInt16ToUint:
newConstArray[i].setUConst(unionArray[i].getI16Const()); break;
case EOpConvInt16ToUint64:
newConstArray[i].setU64Const(unionArray[i].getI16Const()); break;
case EOpConvUint16ToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getU16Const())); break;
case EOpConvUint16ToInt16:
newConstArray[i].setI16Const(unionArray[i].getU16Const()); break;
case EOpConvUint16ToInt:
newConstArray[i].setIConst(unionArray[i].getU16Const()); break;
case EOpConvUint16ToInt64:
newConstArray[i].setI64Const(unionArray[i].getU16Const()); break;
case EOpConvUint16ToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getU16Const())); break;
case EOpConvUint16ToUint:
newConstArray[i].setUConst(unionArray[i].getU16Const()); break;
case EOpConvUint16ToUint64:
newConstArray[i].setU64Const(unionArray[i].getU16Const()); break;
case EOpConvInt16ToFloat16:
newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
case EOpConvInt16ToFloat:
newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
case EOpConvInt16ToDouble:
newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
case EOpConvUint16ToFloat16:
newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
case EOpConvUint16ToFloat:
newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
case EOpConvUint16ToDouble:
newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
case EOpConvIntToInt8:
newConstArray[i].setI8Const((signed char)unionArray[i].getIConst()); break;
case EOpConvIntToInt16:
newConstArray[i].setI16Const((signed short)unionArray[i].getIConst()); break;
case EOpConvIntToInt64:
newConstArray[i].setI64Const(unionArray[i].getIConst()); break;
case EOpConvIntToUint8:
newConstArray[i].setU8Const((unsigned char)unionArray[i].getIConst()); break;
case EOpConvIntToUint16:
newConstArray[i].setU16Const((unsigned char)unionArray[i].getIConst()); break;
case EOpConvIntToUint64:
newConstArray[i].setU64Const(unionArray[i].getIConst()); break;
case EOpConvUintToInt8:
newConstArray[i].setI8Const((signed char)unionArray[i].getUConst()); break;
case EOpConvUintToInt16:
newConstArray[i].setI16Const((signed short)unionArray[i].getUConst()); break;
case EOpConvUintToInt64:
newConstArray[i].setI64Const(unionArray[i].getUConst()); break;
case EOpConvUintToUint8:
newConstArray[i].setU8Const((unsigned char)unionArray[i].getUConst()); break;
case EOpConvUintToUint16:
newConstArray[i].setU16Const((unsigned short)unionArray[i].getUConst()); break;
case EOpConvUintToUint64:
newConstArray[i].setU64Const(unionArray[i].getUConst()); break;
case EOpConvIntToFloat16:
newConstArray[i].setDConst(unionArray[i].getIConst()); break;
case EOpConvUintToFloat16:
newConstArray[i].setDConst(unionArray[i].getUConst()); break;
case EOpConvInt64ToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToUint64:
newConstArray[i].setU64Const(unionArray[i].getI64Const()); break;
case EOpConvUint64ToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToInt64:
newConstArray[i].setI64Const(unionArray[i].getU64Const()); break;
case EOpConvUint64ToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getU64Const())); break;
case EOpConvInt64ToFloat16:
newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToFloat:
newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToDouble:
newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
case EOpConvUint64ToFloat16:
newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToFloat:
newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
case EOpConvUint64ToDouble:
newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
case EOpConvFloat16ToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToInt64:
newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToUint64:
newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
case EOpConvFloat16ToFloat:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvFloat16ToDouble:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvFloatToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt64:
newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint64:
newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
case EOpConvFloatToFloat16:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvDoubleToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
case EOpConvDoubleToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
case EOpConvDoubleToInt64:
newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint64:
newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
case EOpConvDoubleToFloat16:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvPtrToUint64:
case EOpConvUint64ToPtr:
case EOpConstructReference:
newConstArray[i].setU64Const(unionArray[i].getU64Const()); break;
#endif
// TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
case EOpSinh:
@ -685,7 +973,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpInt16BitsToFloat16:
case EOpUint16BitsToFloat16:
default:
return 0;
return nullptr;
}
}
@ -793,6 +1081,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtDouble:
newConstArray[comp].setDConst(std::min(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
#ifndef GLSLANG_WEB
case EbtInt8:
newConstArray[comp].setI8Const(std::min(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
break;
@ -805,18 +1100,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtUint16:
newConstArray[comp].setU16Const(std::min(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
case EbtInt64:
newConstArray[comp].setI64Const(std::min(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
break;
case EbtUint64:
newConstArray[comp].setU64Const(std::min(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
break;
#endif
default: assert(false && "Default missing");
}
break;
@ -827,6 +1117,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtDouble:
newConstArray[comp].setDConst(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
#ifndef GLSLANG_WEB
case EbtInt8:
newConstArray[comp].setI8Const(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
break;
@ -839,18 +1136,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtUint16:
newConstArray[comp].setU16Const(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
case EbtInt64:
newConstArray[comp].setI64Const(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
break;
case EbtUint64:
newConstArray[comp].setU64Const(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
break;
#endif
default: assert(false && "Default missing");
}
break;
@ -862,6 +1154,11 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setDConst(std::min(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()),
childConstUnions[2][arg2comp].getDConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()),
childConstUnions[2][arg2comp].getUConst()));
break;
#ifndef GLSLANG_WEB
case EbtInt8:
newConstArray[comp].setI8Const(std::min(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()),
childConstUnions[2][arg2comp].getI8Const()));
@ -882,10 +1179,6 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setIConst(std::min(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()),
childConstUnions[2][arg2comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()),
childConstUnions[2][arg2comp].getUConst()));
break;
case EbtInt64:
newConstArray[comp].setI64Const(std::min(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()),
childConstUnions[2][arg2comp].getI64Const()));
@ -894,6 +1187,7 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setU64Const(std::min(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()),
childConstUnions[2][arg2comp].getU64Const()));
break;
#endif
default: assert(false && "Default missing");
}
break;
@ -916,12 +1210,17 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setBConst(childConstUnions[0][arg0comp] != childConstUnions[1][arg1comp]);
break;
case EOpMix:
if (children[2]->getAsTyped()->getBasicType() == EbtBool)
newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst() ? childConstUnions[1][arg1comp].getDConst() :
childConstUnions[0][arg0comp].getDConst());
else
newConstArray[comp].setDConst(childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) +
childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst());
if (!children[0]->getAsTyped()->isFloatingDomain())
return aggrNode;
if (children[2]->getAsTyped()->getBasicType() == EbtBool) {
newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst()
? childConstUnions[1][arg1comp].getDConst()
: childConstUnions[0][arg0comp].getDConst());
} else {
newConstArray[comp].setDConst(
childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) +
childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst());
}
break;
case EOpStep:
newConstArray[comp].setDConst(childConstUnions[1][arg1comp].getDConst() < childConstUnions[0][arg0comp].getDConst() ? 0.0 : 1.0);
@ -1073,7 +1372,9 @@ TIntermTyped* TIntermediate::foldDereference(TIntermTyped* node, int index, cons
// arrays, vectors, matrices, all use simple multiplicative math
// while structures need to add up heterogeneous members
int start;
if (node->isArray() || ! node->isStruct())
if (node->getType().isCoopMat())
start = 0;
else if (node->isArray() || ! node->isStruct())
start = size * index;
else {
// it is a structure

4604
Externals/glslang/glslang/MachineIndependent/Initialize.cpp vendored Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -91,6 +91,8 @@ public:
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
protected:
void addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion);
void relateTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage, TSymbolTable&);
void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);

1162
Externals/glslang/glslang/MachineIndependent/Intermediate.cpp vendored Normal file → Executable file

File diff suppressed because it is too large Load Diff

View File

@ -67,6 +67,8 @@ void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReaso
}
}
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
@ -113,6 +115,8 @@ void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReaso
va_end(args);
}
#endif
//
// Both test and if necessary, spit out an error, to see if the node is really
// an l-value that can be operated on this way.
@ -149,10 +153,18 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EvqConst: message = "can't modify a const"; break;
case EvqConstReadOnly: message = "can't modify a const"; break;
case EvqUniform: message = "can't modify a uniform"; break;
#ifndef GLSLANG_WEB
case EvqBuffer:
if (node->getQualifier().readonly)
if (node->getQualifier().isReadOnly())
message = "can't modify a readonly buffer";
if (node->getQualifier().isShaderRecordNV())
message = "can't modify a shaderrecordnv qualified buffer";
break;
case EvqHitAttrNV:
if (language != EShLangIntersectNV)
message = "cannot modify hitAttributeNV in this stage";
break;
#endif
default:
//
@ -162,12 +174,17 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EbtSampler:
message = "can't modify a sampler";
break;
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtVoid:
message = "can't modify void";
break;
#ifndef GLSLANG_WEB
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtAccStructNV:
message = "can't modify accelerationStructureNV";
break;
#endif
default:
break;
}
@ -219,7 +236,7 @@ void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op,
}
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode && symNode->getQualifier().writeonly)
if (symNode && symNode->getQualifier().isWriteOnly())
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
}
@ -239,11 +256,17 @@ void TParseContextBase::trackLinkage(TSymbol& symbol)
// Give an error if not.
void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
{
const auto sizeIsSpecializationExpression = [&type]() {
return type.containsSpecializationSize() &&
type.getArraySizes()->getOuterNode() != nullptr &&
type.getArraySizes()->getOuterNode()->getAsSymbolNode() == nullptr; };
if (index < 0) {
error(loc, "", "[", "index out of range '%d'", index);
index = 0;
} else if (type.isArray()) {
if (type.isSizedArray() && index >= type.getOuterArraySize()) {
if (type.isSizedArray() && !sizeIsSpecializationExpression() &&
index >= type.getOuterArraySize()) {
error(loc, "", "[", "array index out of range '%d'", index);
index = type.getOuterArraySize() - 1;
}
@ -553,6 +576,7 @@ void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TStrin
selector.push_back(0);
}
#ifdef ENABLE_HLSL
//
// Make the passed-in variable information become a member of the
// global uniform block. If this doesn't exist yet, make it.
@ -597,6 +621,7 @@ void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& mem
++firstNewMember;
}
#endif
void TParseContextBase::finish()
{

2646
Externals/glslang/glslang/MachineIndependent/ParseHelper.cpp vendored Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -84,6 +85,7 @@ public:
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
postEntryPointReturn(false),
contextPragma(true, false),
beginInvocationInterlockCount(0), endInvocationInterlockCount(0),
parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
limits(resources.limits),
globalUniformBlock(nullptr),
@ -95,6 +97,7 @@ public:
}
virtual ~TParseContextBase() { }
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
@ -103,6 +106,7 @@ public:
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
#endif
virtual void setLimits(const TBuiltInResource&) = 0;
@ -148,8 +152,10 @@ public:
extensionCallback(line, extension, behavior);
}
#ifdef ENABLE_HLSL
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
#endif
// Potentially rename shader entry point function
void renameShaderFunction(TString*& name) const
@ -181,6 +187,8 @@ public:
// the statementNestingLevel the current switch statement is at, which must match the level of its case statements
TList<int> switchLevel;
struct TPragma contextPragma;
int beginInvocationInterlockCount;
int endInvocationInterlockCount;
protected:
TParseContextBase(TParseContextBase&);
@ -275,7 +283,7 @@ public:
const TString* entryPoint = nullptr);
virtual ~TParseContext();
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); };
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); }
void setPrecisionDefaults();
void setLimits(const TBuiltInResource&) override;
@ -293,19 +301,21 @@ public:
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
#ifndef GLSLANG_WEB
void makeEditable(TSymbol*&) override;
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
#endif
bool isIoResizeArray(const TType&) const;
void fixIoArraySize(const TSourceLoc&, TType&);
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
int getIoArrayImplicitSize() const;
int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&);
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, const TString& field);
void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName);
TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&);
TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
@ -323,6 +333,7 @@ public:
TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&);
void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier);
void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode);
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
@ -335,7 +346,7 @@ public:
void globalCheck(const TSourceLoc&, const char* token);
bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&);
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType);
bool arrayQualifierError(const TSourceLoc&, const TQualifier&);
bool arrayError(const TSourceLoc&, const TType&);
void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
@ -347,6 +358,7 @@ public:
void boolCheck(const TSourceLoc&, const TPublicType&);
void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer);
void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier);
void accStructNVCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
void memberQualifierCheck(glslang::TPublicType&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&);
@ -367,6 +379,8 @@ public:
void nestedStructCheck(const TSourceLoc&);
void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op);
void opaqueCheck(const TSourceLoc&, const TType&, const char* op);
void referenceCheck(const TSourceLoc&, const TType&, const char* op);
void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op);
void specializationCheck(const TSourceLoc&, const TType&, const char* op);
void structTypeCheck(const TSourceLoc&, TPublicType&);
void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop);
@ -396,11 +410,12 @@ public:
TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to);
void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
void fixBlockXfbOffsets(TQualifier&, TTypeList&);
void fixXfbOffsets(TQualifier&, TTypeList&);
void fixBlockUniformOffsets(TQualifier&, TTypeList&);
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
@ -409,6 +424,7 @@ public:
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
#ifndef GLSLANG_WEB
TAttributeType attributeFromName(const TString& name) const;
TAttributes* makeAttributes(const TString& identifier) const;
TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
@ -417,9 +433,11 @@ public:
// Determine selection control from attributes
void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
// Determine loop control from attributes
void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
#endif
void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember);
protected:
void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
@ -431,7 +449,9 @@ protected:
bool isRuntimeLength(const TIntermTyped&) const;
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
#ifndef GLSLANG_WEB
void finish() override;
#endif
public:
//
@ -457,10 +477,11 @@ protected:
TQualifier globalUniformDefaults;
TQualifier globalInputDefaults;
TQualifier globalOutputDefaults;
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
TString currentCaller; // name of last function body entered (not valid when at global scope)
TIdSetType inductiveLoopIds;
#ifndef GLSLANG_WEB
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
bool anyIndexLimits;
TIdSetType inductiveLoopIds;
TVector<TIntermTyped*> needsIndexLimitationChecking;
//
@ -496,6 +517,7 @@ protected:
// array-sizing declarations
//
TVector<TSymbol*> ioArraySymbolResizeList;
#endif
};
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -65,7 +65,7 @@ public:
}
if (names != nullptr) {
for (int i = 0; i < numSources; ++i)
loc[i].name = names[i];
loc[i].name = names[i] != nullptr ? NewPoolTString(names[i]) : nullptr;
}
loc[currentSource].line = 1;
logicalSourceLoc.init(1);
@ -170,16 +170,18 @@ public:
// for #line override in filename based parsing
void setFile(const char* filename)
{
logicalSourceLoc.name = filename;
loc[getLastValidSourceIndex()].name = filename;
TString* fn_tstr = NewPoolTString(filename);
logicalSourceLoc.name = fn_tstr;
loc[getLastValidSourceIndex()].name = fn_tstr;
}
void setFile(const char* filename, int i)
{
TString* fn_tstr = NewPoolTString(filename);
if (i == getLastValidSourceIndex()) {
logicalSourceLoc.name = filename;
logicalSourceLoc.name = fn_tstr;
}
loc[i].name = filename;
loc[i].name = fn_tstr;
}
void setString(int newString)

View File

@ -50,7 +50,10 @@ class TParserToken;
class TScanContext {
public:
explicit TScanContext(TParseContextBase& pc) : parseContext(pc), afterType(false), field(false) { }
explicit TScanContext(TParseContextBase& pc) :
parseContext(pc),
afterType(false), afterStruct(false),
field(false), afterBuffer(false) { }
virtual ~TScanContext() { }
static void fillInKeywordMap();
@ -76,7 +79,9 @@ protected:
TParseContextBase& parseContext;
bool afterType; // true if we've recognized a type, so can only be looking for an identifier
bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier
bool field; // true if we're on a field, right after a '.'
bool afterBuffer; // true if we've recognized the BUFFER keyword
TSourceLoc loc;
TParserToken* parserToken;
TPpToken* ppToken;

View File

@ -1,7 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
// Copyright (C) 2015-2017 Google, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -67,6 +67,11 @@
#include "iomapper.h"
#include "Initialize.h"
// TODO: this really shouldn't be here, it is only because of the trial addition
// of printing pre-processed tokens, which requires knowing the string literal
// token to print ", but none of that seems appropriate for this file.
#include "preprocessor/PpTokens.h"
namespace { // anonymous namespace for file-local functions and symbols
// Total number of successful initializers of glslang: a refcount
@ -283,6 +288,11 @@ void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int versi
EShLanguage language, EShSource source, TInfoSink& infoSink, TSymbolTable** commonTable,
TSymbolTable** symbolTables)
{
#ifdef GLSLANG_WEB
profile = EEsProfile;
version = 310;
#endif
(*symbolTables[language]).adoptLevels(*commonTable[CommonIndex(profile, language)]);
InitializeSymbolTable(builtInParseables.getStageString(language), version, profile, spvVersion, language, source,
infoSink, *symbolTables[language]);
@ -299,6 +309,11 @@ void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int versi
//
bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TSymbolTable** symbolTables, int version, EProfile profile, const SpvVersion& spvVersion, EShSource source)
{
#ifdef GLSLANG_WEB
profile = EEsProfile;
version = 310;
#endif
std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
if (builtInParseables == nullptr)
@ -321,6 +336,7 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source,
infoSink, commonTable, symbolTables);
#ifndef GLSLANG_WEB
// check for tessellation
if ((profile != EEsProfile && version >= 150) ||
(profile == EEsProfile && version >= 310)) {
@ -335,6 +351,7 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
(profile == EEsProfile && version >= 310))
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangGeometry, source,
infoSink, commonTable, symbolTables);
#endif
// check for compute
if ((profile != EEsProfile && version >= 420) ||
@ -342,6 +359,34 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source,
infoSink, commonTable, symbolTables);
// check for ray tracing stages
if (profile != EEsProfile && version >= 450) {
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGenNV, source,
infoSink, commonTable, symbolTables);
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangIntersectNV, source,
infoSink, commonTable, symbolTables);
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangAnyHitNV, source,
infoSink, commonTable, symbolTables);
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangClosestHitNV, source,
infoSink, commonTable, symbolTables);
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMissNV, source,
infoSink, commonTable, symbolTables);
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCallableNV, source,
infoSink, commonTable, symbolTables);
}
// check for mesh
if ((profile != EEsProfile && version >= 450) ||
(profile == EEsProfile && version >= 320))
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMeshNV, source,
infoSink, commonTable, symbolTables);
// check for task
if ((profile != EEsProfile && version >= 450) ||
(profile == EEsProfile && version >= 320))
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTaskNV, source,
infoSink, commonTable, symbolTables);
return true;
}
@ -439,6 +484,18 @@ void SetupBuiltinSymbolTable(int version, EProfile profile, const SpvVersion& sp
glslang::ReleaseGlobalLock();
}
// Function to Print all builtins
void DumpBuiltinSymbolTable(TInfoSink& infoSink, const TSymbolTable& symbolTable)
{
#ifndef GLSLANG_WEB
infoSink.debug << "BuiltinSymbolTable {\n";
symbolTable.dump(infoSink, true);
infoSink.debug << "}\n";
#endif
}
// Return true if the shader was correctly specified for version/profile/stage.
bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNotFirst, int defaultVersion,
EShSource source, int& version, EProfile& profile, const SpvVersion& spvVersion)
@ -534,6 +591,7 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
break;
}
#ifndef GLSLANG_WEB
// Correct for stage type...
switch (stage) {
case EShLangGeometry:
@ -565,6 +623,26 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
version = profile == EEsProfile ? 310 : 420;
}
break;
case EShLangRayGenNV:
case EShLangIntersectNV:
case EShLangAnyHitNV:
case EShLangClosestHitNV:
case EShLangMissNV:
case EShLangCallableNV:
if (profile == EEsProfile || version < 460) {
correct = false;
infoSink.info.message(EPrefixError, "#version: ray tracing shaders require non-es profile with version 460 or above");
version = 460;
}
break;
case EShLangMeshNV:
case EShLangTaskNV:
if ((profile == EEsProfile && version < 320) ||
(profile != EEsProfile && version < 450)) {
correct = false;
infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above");
version = profile == EEsProfile ? 320 : 450;
}
default:
break;
}
@ -577,15 +655,10 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
// Check for SPIR-V compatibility
if (spvVersion.spv != 0) {
switch (profile) {
case EEsProfile:
if (spvVersion.vulkan > 0 && version < 310) {
case EEsProfile:
if (version < 310) {
correct = false;
infoSink.info.message(EPrefixError, "#version: ES shaders for Vulkan SPIR-V require version 310 or higher");
version = 310;
}
if (spvVersion.openGl >= 100) {
correct = false;
infoSink.info.message(EPrefixError, "#version: ES shaders for OpenGL SPIR-V are not supported");
infoSink.info.message(EPrefixError, "#version: ES shaders for SPIR-V require version 310 or higher");
version = 310;
}
break;
@ -606,6 +679,7 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
break;
}
}
#endif
return correct;
}
@ -764,13 +838,17 @@ bool ProcessDeferred(
// Get all the stages, languages, clients, and other environment
// stuff sorted out.
EShSource source = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl;
EShSource sourceGuess = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl;
SpvVersion spvVersion;
EShLanguage stage = compiler->getLanguage();
TranslateEnvironment(environment, messages, source, stage, spvVersion);
TranslateEnvironment(environment, messages, sourceGuess, stage, spvVersion);
#ifdef ENABLE_HLSL
EShSource source = sourceGuess;
if (environment != nullptr && environment->target.hlslFunctionality1)
intermediate.setHlslFunctionality1();
#else
const EShSource source = EShSourceGlsl;
#endif
// First, without using the preprocessor or parser, find the #version, so we know what
// symbol tables, processing rules, etc. to set up. This does not need the extra strings
// outlined above, just the user shader, after the system and user preambles.
@ -783,6 +861,7 @@ bool ProcessDeferred(
: userInput.scanVersion(version, profile, versionNotFirstToken);
bool versionNotFound = version == 0;
if (forceDefaultVersionAndProfile && source == EShSourceGlsl) {
#ifndef GLSLANG_WEB
if (! (messages & EShMsgSuppressWarnings) && ! versionNotFound &&
(version != defaultVersion || profile != defaultProfile)) {
compiler->infoSink.info << "Warning, (version, profile) forced to be ("
@ -790,7 +869,7 @@ bool ProcessDeferred(
<< "), while in source code it is ("
<< version << ", " << ProfileName(profile) << ")\n";
}
#endif
if (versionNotFound) {
versionNotFirstToken = false;
versionNotFirst = false;
@ -802,7 +881,13 @@ bool ProcessDeferred(
bool goodVersion = DeduceVersionProfile(compiler->infoSink, stage,
versionNotFirst, defaultVersion, source, version, profile, spvVersion);
#ifdef GLSLANG_WEB
profile = EEsProfile;
version = 310;
#endif
bool versionWillBeError = (versionNotFound || (profile == EEsProfile && version >= 300 && versionNotFirst));
#ifndef GLSLANG_WEB
bool warnVersionNotFirst = false;
if (! versionWillBeError && versionNotFirstToken) {
if (messages & EShMsgRelaxedErrors)
@ -810,6 +895,7 @@ bool ProcessDeferred(
else
versionWillBeError = true;
}
#endif
intermediate.setSource(source);
intermediate.setVersion(version);
@ -818,12 +904,17 @@ bool ProcessDeferred(
RecordProcesses(intermediate, messages, sourceEntryPointName);
if (spvVersion.vulkan > 0)
intermediate.setOriginUpperLeft();
#ifdef ENABLE_HLSL
if ((messages & EShMsgHlslOffsets) || source == EShSourceHlsl)
intermediate.setHlslOffsets();
#endif
if (messages & EShMsgDebugInfo) {
intermediate.setSourceFile(names[numPre]);
for (int s = 0; s < numStrings; ++s)
intermediate.addSourceText(strings[numPre + s]);
for (int s = 0; s < numStrings; ++s) {
// The string may not be null-terminated, so make sure we provide
// the length along with the string.
intermediate.addSourceText(strings[numPre + s], lengths[numPre + s]);
}
}
SetupBuiltinSymbolTable(version, profile, spvVersion, source);
@ -845,6 +936,9 @@ bool ProcessDeferred(
return false;
}
if (messages & EShMsgBuiltinSymbolTable)
DumpBuiltinSymbolTable(compiler->infoSink, *symbolTable);
//
// Now we can process the full shader under proper symbols and rules.
//
@ -863,11 +957,13 @@ bool ProcessDeferred(
parseContext->setLimits(*resources);
if (! goodVersion)
parseContext->addError();
#ifndef GLSLANG_WEB
if (warnVersionNotFirst) {
TSourceLoc loc;
loc.init();
parseContext->warn(loc, "Illegal to have non-comment, non-whitespace tokens before #version", "#version", "");
}
#endif
parseContext->initializeExtensionBehavior();
@ -898,6 +994,8 @@ bool ProcessDeferred(
return success;
}
#ifndef GLSLANG_WEB
// Responsible for keeping track of the most recent source string and line in
// the preprocessor and outputting newlines appropriately if the source string
// or line changes.
@ -965,6 +1063,8 @@ private:
// DoPreprocessing is a valid ProcessingContext template argument,
// which only performs the preprocessing step of compilation.
// It places the result in the "string" argument to its constructor.
//
// This is not an officially supported or fully working path.
struct DoPreprocessing {
explicit DoPreprocessing(std::string* string): outputString(string) {}
bool operator()(TParseContextBase& parseContext, TPpContext& ppContext,
@ -1072,7 +1172,11 @@ struct DoPreprocessing {
outputBuffer += ' ';
}
lastToken = token;
if (token == PpAtomConstString)
outputBuffer += "\"";
outputBuffer += ppToken.name;
if (token == PpAtomConstString)
outputBuffer += "\"";
} while (true);
outputBuffer += '\n';
*outputString = std::move(outputBuffer);
@ -1088,6 +1192,8 @@ struct DoPreprocessing {
std::string* outputString;
};
#endif
// DoFullParse is a valid ProcessingConext template argument for fully
// parsing the shader. It populates the "intermediate" with the AST.
struct DoFullParse{
@ -1118,10 +1224,14 @@ struct DoFullParse{
}
};
#ifndef GLSLANG_WEB
// Take a single compilation unit, and run the preprocessor on it.
// Return: True if there were no issues found in preprocessing,
// False if during preprocessing any unknown version, pragmas or
// extensions were found.
//
// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
// is not an officially supported or fully working path.
bool PreprocessDeferred(
TCompiler* compiler,
const char* const shaderStrings[],
@ -1147,6 +1257,7 @@ bool PreprocessDeferred(
forwardCompatible, messages, intermediate, parser,
false, includer);
}
#endif
//
// do a partial compile on the given strings for a single compilation unit
@ -1266,7 +1377,7 @@ void ShDestruct(ShHandle handle)
//
// Cleanup symbol tables
//
int __fastcall ShFinalize()
int ShFinalize()
{
glslang::GetGlobalLock();
--NumberOfClients;
@ -1665,6 +1776,11 @@ void TShader::addProcesses(const std::vector<std::string>& p)
intermediate->addProcesses(p);
}
void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); }
void TShader::setNanMinMaxClamp(bool useNonNan) { intermediate->setNanMinMaxClamp(useNonNan); }
#ifndef GLSLANG_WEB
// Set binding base for given resource type
void TShader::setShiftBinding(TResourceType res, unsigned int base) {
intermediate->setShiftBinding(res, base);
@ -1692,15 +1808,27 @@ void TShader::setShiftSsboBinding(unsigned int base) { setShiftBinding(EResSs
// Enables binding automapping using TIoMapper
void TShader::setAutoMapBindings(bool map) { intermediate->setAutoMapBindings(map); }
// Enables position.Y output negation in vertex shader
void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); }
// Fragile: currently within one stage: simple auto-assignment of location
void TShader::setAutoMapLocations(bool map) { intermediate->setAutoMapLocations(map); }
// See comment above TDefaultHlslIoMapper in iomapper.cpp:
void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); }
void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); }
void TShader::addUniformLocationOverride(const char* name, int loc)
{
intermediate->addUniformLocationOverride(name, loc);
}
void TShader::setUniformLocationBase(int base)
{
intermediate->setUniformLocationBase(base);
}
void TShader::setNoStorageFormat(bool useUnknownFormat) { intermediate->setNoStorageFormat(useUnknownFormat); }
void TShader::setResourceSetBinding(const std::vector<std::string>& base) { intermediate->setResourceSetBinding(base); }
void TShader::setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { intermediate->setTextureSamplerTransformMode(mode); }
#endif
#ifdef ENABLE_HLSL
// See comment above TDefaultHlslIoMapper in iomapper.cpp:
void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); }
void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); }
#endif
//
// Turn the shader strings into a parse tree in the TIntermediate.
@ -1724,8 +1852,12 @@ bool TShader::parse(const TBuiltInResource* builtInResources, int defaultVersion
&environment);
}
#ifndef GLSLANG_WEB
// Fill in a string with the result of preprocessing ShaderStrings
// Returns true if all extensions, pragmas and version strings were valid.
//
// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
// is not an officially supported or fully working path.
bool TShader::preprocess(const TBuiltInResource* builtInResources,
int defaultVersion, EProfile defaultProfile,
bool forceDefaultVersionAndProfile,
@ -1745,6 +1877,7 @@ bool TShader::preprocess(const TBuiltInResource* builtInResources,
defaultProfile, forceDefaultVersionAndProfile,
forwardCompatible, message, includer, *intermediate, output_string);
}
#endif
const char* TShader::getInfoLog()
{
@ -1756,7 +1889,11 @@ const char* TShader::getInfoDebugLog()
return infoSink->debug.c_str();
}
TProgram::TProgram() : reflection(0), ioMapper(nullptr), linked(false)
TProgram::TProgram() :
#ifndef GLSLANG_WEB
reflection(0),
#endif
linked(false)
{
pool = new TPoolAllocator;
infoSink = new TInfoSink;
@ -1768,9 +1905,10 @@ TProgram::TProgram() : reflection(0), ioMapper(nullptr), linked(false)
TProgram::~TProgram()
{
delete ioMapper;
delete infoSink;
#ifndef GLSLANG_WEB
delete reflection;
#endif
for (int s = 0; s < EShLangCount; ++s)
if (newedIntermediate[s])
@ -1815,6 +1953,7 @@ bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
if (stages[stage].size() == 0)
return true;
#ifndef GLSLANG_WEB
int numEsShaders = 0, numNonEsShaders = 0;
for (auto it = stages[stage].begin(); it != stages[stage].end(); ++it) {
if ((*it)->intermediate->getProfile() == EEsProfile) {
@ -1863,7 +2002,9 @@ bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
for (it = stages[stage].begin(); it != stages[stage].end(); ++it)
intermediate[stage]->merge(*infoSink, *(*it)->intermediate);
}
#else
intermediate[stage] = stages[stage].front()->intermediate;
#endif
intermediate[stage]->finalCheck(*infoSink, (messages & EShMsgKeepUncalled) != 0);
if (messages & EShMsgAST)
@ -1882,16 +2023,33 @@ const char* TProgram::getInfoDebugLog()
return infoSink->debug.c_str();
}
#ifndef GLSLANG_WEB
//
// Reflection implementation.
//
bool TProgram::buildReflection()
bool TProgram::buildReflection(int opts)
{
if (! linked || reflection)
if (! linked || reflection != nullptr)
return false;
reflection = new TReflection;
int firstStage = EShLangVertex, lastStage = EShLangFragment;
if (opts & EShReflectionIntermediateIO) {
// if we're reflecting intermediate I/O, determine the first and last stage linked and use those as the
// boundaries for which stages generate pipeline inputs/outputs
firstStage = EShLangCount;
lastStage = 0;
for (int s = 0; s < EShLangCount; ++s) {
if (intermediate[s]) {
firstStage = std::min(firstStage, s);
lastStage = std::max(lastStage, s);
}
}
}
reflection = new TReflection((EShReflectionOptions)opts, (EShLanguage)firstStage, (EShLanguage)lastStage);
for (int s = 0; s < EShLangCount; ++s) {
if (intermediate[s]) {
@ -1903,47 +2061,50 @@ bool TProgram::buildReflection()
return true;
}
int TProgram::getNumLiveUniformVariables() const { return reflection->getNumUniforms(); }
int TProgram::getNumLiveUniformBlocks() const { return reflection->getNumUniformBlocks(); }
const char* TProgram::getUniformName(int index) const { return reflection->getUniform(index).name.c_str(); }
const char* TProgram::getUniformBlockName(int index) const { return reflection->getUniformBlock(index).name.c_str(); }
int TProgram::getUniformBlockSize(int index) const { return reflection->getUniformBlock(index).size; }
int TProgram::getUniformIndex(const char* name) const { return reflection->getIndex(name); }
int TProgram::getUniformBinding(int index) const { return reflection->getUniform(index).getBinding(); }
int TProgram::getUniformBlockBinding(int index) const { return reflection->getUniformBlock(index).getBinding(); }
int TProgram::getUniformBlockIndex(int index) const { return reflection->getUniform(index).index; }
int TProgram::getUniformBlockCounterIndex(int index) const { return reflection->getUniformBlock(index).counterIndex; }
int TProgram::getUniformType(int index) const { return reflection->getUniform(index).glDefineType; }
int TProgram::getUniformBufferOffset(int index) const { return reflection->getUniform(index).offset; }
int TProgram::getUniformArraySize(int index) const { return reflection->getUniform(index).size; }
int TProgram::getNumLiveAttributes() const { return reflection->getNumAttributes(); }
const char* TProgram::getAttributeName(int index) const { return reflection->getAttribute(index).name.c_str(); }
int TProgram::getAttributeType(int index) const { return reflection->getAttribute(index).glDefineType; }
const TType* TProgram::getAttributeTType(int index) const { return reflection->getAttribute(index).getType(); }
const TType* TProgram::getUniformTType(int index) const { return reflection->getUniform(index).getType(); }
const TType* TProgram::getUniformBlockTType(int index) const { return reflection->getUniformBlock(index).getType(); }
unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
int TProgram::getReflectionIndex(const char* name) const { return reflection->getIndex(name); }
int TProgram::getReflectionPipeIOIndex(const char* name, const bool inOrOut) const
{ return reflection->getPipeIOIndex(name, inOrOut); }
void TProgram::dumpReflection() { reflection->dump(); }
int TProgram::getNumUniformVariables() const { return reflection->getNumUniforms(); }
const TObjectReflection& TProgram::getUniform(int index) const { return reflection->getUniform(index); }
int TProgram::getNumUniformBlocks() const { return reflection->getNumUniformBlocks(); }
const TObjectReflection& TProgram::getUniformBlock(int index) const { return reflection->getUniformBlock(index); }
int TProgram::getNumPipeInputs() const { return reflection->getNumPipeInputs(); }
const TObjectReflection& TProgram::getPipeInput(int index) const { return reflection->getPipeInput(index); }
int TProgram::getNumPipeOutputs() const { return reflection->getNumPipeOutputs(); }
const TObjectReflection& TProgram::getPipeOutput(int index) const { return reflection->getPipeOutput(index); }
int TProgram::getNumBufferVariables() const { return reflection->getNumBufferVariables(); }
const TObjectReflection& TProgram::getBufferVariable(int index) const { return reflection->getBufferVariable(index); }
int TProgram::getNumBufferBlocks() const { return reflection->getNumStorageBuffers(); }
const TObjectReflection& TProgram::getBufferBlock(int index) const { return reflection->getStorageBufferBlock(index); }
int TProgram::getNumAtomicCounters() const { return reflection->getNumAtomicCounters(); }
const TObjectReflection& TProgram::getAtomicCounter(int index) const { return reflection->getAtomicCounter(index); }
void TProgram::dumpReflection() { if (reflection != nullptr) reflection->dump(); }
//
// I/O mapping implementation.
//
bool TProgram::mapIO(TIoMapResolver* resolver)
bool TProgram::mapIO(TIoMapResolver* pResolver, TIoMapper* pIoMapper)
{
if (! linked || ioMapper)
if (! linked)
return false;
ioMapper = new TIoMapper;
TIoMapper* ioMapper = nullptr;
TIoMapper defaultIOMapper;
if (pIoMapper == nullptr)
ioMapper = &defaultIOMapper;
else
ioMapper = pIoMapper;
for (int s = 0; s < EShLangCount; ++s) {
if (intermediate[s]) {
if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, resolver))
if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, pResolver))
return false;
}
}
return true;
return ioMapper->doMap(pResolver, *infoSink);
}
#endif // GLSLANG_WEB
} // end namespace glslang

View File

@ -2,6 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -60,58 +61,66 @@ void TType::buildMangledName(TString& mangledName) const
switch (basicType) {
case EbtFloat: mangledName += 'f'; break;
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt: mangledName += 'i'; break;
case EbtUint: mangledName += 'u'; break;
case EbtBool: mangledName += 'b'; break;
#ifndef GLSLANG_WEB
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt8: mangledName += "i8"; break;
case EbtUint8: mangledName += "u8"; break;
case EbtInt16: mangledName += "i16"; break;
case EbtUint16: mangledName += "u16"; break;
case EbtInt64: mangledName += "i64"; break;
case EbtUint64: mangledName += "u64"; break;
case EbtBool: mangledName += 'b'; break;
case EbtAtomicUint: mangledName += "au"; break;
case EbtAccStructNV: mangledName += "asnv"; break;
#endif
case EbtSampler:
switch (sampler.type) {
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtFloat16: mangledName += "f16"; break;
#endif
case EbtInt: mangledName += "i"; break;
case EbtUint: mangledName += "u"; break;
default: break; // some compilers want this
}
if (sampler.image)
mangledName += "I"; // a normal image
else if (sampler.sampler)
if (sampler.isImageClass())
mangledName += "I"; // a normal image or subpass
else if (sampler.isPureSampler())
mangledName += "p"; // a "pure" sampler
else if (!sampler.combined)
else if (!sampler.isCombined())
mangledName += "t"; // a "pure" texture
else
mangledName += "s"; // traditional combined sampler
if (sampler.arrayed)
if (sampler.isArrayed())
mangledName += "A";
if (sampler.shadow)
if (sampler.isShadow())
mangledName += "S";
if (sampler.external)
if (sampler.isExternal())
mangledName += "E";
if (sampler.isYuv())
mangledName += "Y";
switch (sampler.dim) {
case Esd1D: mangledName += "1"; break;
case Esd2D: mangledName += "2"; break;
case Esd3D: mangledName += "3"; break;
case EsdCube: mangledName += "C"; break;
#ifndef GLSLANG_WEB
case Esd1D: mangledName += "1"; break;
case EsdRect: mangledName += "R2"; break;
case EsdBuffer: mangledName += "B"; break;
case EsdSubpass: mangledName += "P"; break;
#endif
default: break; // some compilers want this
}
#ifdef ENABLE_HLSL
if (sampler.hasReturnStruct()) {
// Name mangle for sampler return struct uses struct table index.
mangledName += "-tx-struct";
char text[16]; // plenty enough space for the small integers.
snprintf(text, sizeof(text), "%d-", sampler.structReturnIndex);
snprintf(text, sizeof(text), "%d-", sampler.getStructReturnIndex());
mangledName += text;
} else {
switch (sampler.getVectorSize()) {
@ -121,8 +130,9 @@ void TType::buildMangledName(TString& mangledName) const
case 4: break; // default to prior name mangle behavior
}
}
#endif
if (sampler.ms)
if (sampler.isMultiSample())
mangledName += "M";
break;
case EbtStruct:
@ -166,44 +176,88 @@ void TType::buildMangledName(TString& mangledName) const
}
}
#ifndef GLSLANG_WEB
//
// Dump functions.
//
void TVariable::dump(TInfoSink& infoSink) const
void TSymbol::dumpExtensions(TInfoSink& infoSink) const
{
infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " " << type.getBasicTypeString();
if (type.isArray()) {
infoSink.debug << "[0]";
int numExtensions = getNumExtensions();
if (numExtensions) {
infoSink.debug << " <";
for (int i = 0; i < numExtensions; i++)
infoSink.debug << getExtensions()[i] << ",";
infoSink.debug << ">";
}
}
void TVariable::dump(TInfoSink& infoSink, bool complete) const
{
if (complete) {
infoSink.debug << getName().c_str() << ": " << type.getCompleteString();
dumpExtensions(infoSink);
} else {
infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " "
<< type.getBasicTypeString();
if (type.isArray())
infoSink.debug << "[0]";
}
infoSink.debug << "\n";
}
void TFunction::dump(TInfoSink& infoSink) const
void TFunction::dump(TInfoSink& infoSink, bool complete) const
{
infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " " << getMangledName().c_str() << "\n";
if (complete) {
infoSink.debug << getName().c_str() << ": " << returnType.getCompleteString() << " " << getName().c_str()
<< "(";
int numParams = getParamCount();
for (int i = 0; i < numParams; i++) {
const TParameter &param = parameters[i];
infoSink.debug << param.type->getCompleteString() << " "
<< (param.type->isStruct() ? "of " + param.type->getTypeName() + " " : "")
<< (param.name ? *param.name : "") << (i < numParams - 1 ? "," : "");
}
infoSink.debug << ")";
dumpExtensions(infoSink);
} else {
infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " "
<< getMangledName().c_str() << "n";
}
infoSink.debug << "\n";
}
void TAnonMember::dump(TInfoSink& TInfoSink) const
void TAnonMember::dump(TInfoSink& TInfoSink, bool) const
{
TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str() << "\n";
TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str()
<< "\n";
}
void TSymbolTableLevel::dump(TInfoSink &infoSink) const
void TSymbolTableLevel::dump(TInfoSink& infoSink, bool complete) const
{
tLevel::const_iterator it;
for (it = level.begin(); it != level.end(); ++it)
(*it).second->dump(infoSink);
(*it).second->dump(infoSink, complete);
}
void TSymbolTable::dump(TInfoSink &infoSink) const
void TSymbolTable::dump(TInfoSink& infoSink, bool complete) const
{
for (int level = currentLevel(); level >= 0; --level) {
infoSink.debug << "LEVEL " << level << "\n";
table[level]->dump(infoSink);
table[level]->dump(infoSink, complete);
}
}
#endif
//
// Functions have buried pointers to delete.
//
@ -283,19 +337,25 @@ TVariable::TVariable(const TVariable& copyOf) : TSymbol(copyOf)
{
type.deepCopy(copyOf.type);
userType = copyOf.userType;
numExtensions = 0;
extensions = 0;
if (copyOf.numExtensions != 0)
setExtensions(copyOf.numExtensions, copyOf.extensions);
// we don't support specialization-constant subtrees in cloned tables, only extensions
constSubtree = nullptr;
extensions = nullptr;
memberExtensions = nullptr;
if (copyOf.getNumExtensions() > 0)
setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions());
if (copyOf.hasMemberExtensions()) {
for (int m = 0; m < (int)copyOf.type.getStruct()->size(); ++m) {
if (copyOf.getNumMemberExtensions(m) > 0)
setMemberExtensions(m, copyOf.getNumMemberExtensions(m), copyOf.getMemberExtensions(m));
}
}
if (! copyOf.constArray.empty()) {
assert(! copyOf.type.isStruct());
TConstUnionArray newArray(copyOf.constArray, 0, copyOf.constArray.size());
constArray = newArray;
}
// don't support specialization-constant subtrees in cloned tables
constSubtree = nullptr;
}
TVariable* TVariable::clone() const
@ -313,10 +373,9 @@ TFunction::TFunction(const TFunction& copyOf) : TSymbol(copyOf)
parameters.back().copyParam(copyOf.parameters[i]);
}
numExtensions = 0;
extensions = 0;
if (copyOf.extensions != 0)
setExtensions(copyOf.numExtensions, copyOf.extensions);
extensions = nullptr;
if (copyOf.getNumExtensions() > 0)
setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions());
returnType.deepCopy(copyOf.returnType);
mangledName = copyOf.mangledName;
op = copyOf.op;
@ -355,12 +414,12 @@ TSymbolTableLevel* TSymbolTableLevel::clone() const
const TAnonMember* anon = iter->second->getAsAnonMember();
if (anon) {
// Insert all the anonymous members of this same container at once,
// avoid inserting the other members in the future, once this has been done,
// avoid inserting the remaining members in the future, once this has been done,
// allowing them to all be part of the same new container.
if (! containerCopied[anon->getAnonId()]) {
TVariable* container = anon->getAnonContainer().clone();
container->changeName(NewPoolTString(""));
// insert the whole container
// insert the container and all its members
symTableLevel->insert(*container, false);
containerCopied[anon->getAnonId()] = true;
}

View File

@ -1,6 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -78,10 +79,12 @@ class TVariable;
class TFunction;
class TAnonMember;
typedef TVector<const char*> TExtensionList;
class TSymbol {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
explicit TSymbol(const TString *n) : name(n), numExtensions(0), extensions(0), writable(true) { }
explicit TSymbol(const TString *n) : name(n), extensions(0), writable(true) { }
virtual TSymbol* clone() const = 0;
virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool
@ -103,18 +106,21 @@ public:
virtual TType& getWritableType() = 0;
virtual void setUniqueId(int id) { uniqueId = id; }
virtual int getUniqueId() const { return uniqueId; }
virtual void setExtensions(int num, const char* const exts[])
virtual void setExtensions(int numExts, const char* const exts[])
{
assert(extensions == 0);
assert(num > 0);
numExtensions = num;
extensions = NewPoolObject(exts[0], num);
for (int e = 0; e < num; ++e)
extensions[e] = exts[e];
assert(numExts > 0);
extensions = NewPoolObject(extensions);
for (int e = 0; e < numExts; ++e)
extensions->push_back(exts[e]);
}
virtual int getNumExtensions() const { return numExtensions; }
virtual const char** getExtensions() const { return extensions; }
virtual void dump(TInfoSink &infoSink) const = 0;
virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); }
virtual const char** getExtensions() const { return extensions->data(); }
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0;
void dumpExtensions(TInfoSink& infoSink) const;
#endif
virtual bool isReadOnly() const { return ! writable; }
virtual void makeReadOnly() { writable = false; }
@ -128,8 +134,7 @@ protected:
// For tracking what extensions must be present
// (don't use if correct version/profile is present).
int numExtensions;
const char** extensions; // an array of pointers to existing constant char strings
TExtensionList* extensions; // an array of pointers to existing constant char strings
//
// N.B.: Non-const functions that will be generally used should assert on this,
@ -154,7 +159,9 @@ public:
: TSymbol(name),
userType(uT),
constSubtree(nullptr),
anonId(-1) { type.shallowCopy(t); }
memberExtensions(nullptr),
anonId(-1)
{ type.shallowCopy(t); }
virtual TVariable* clone() const;
virtual ~TVariable() { }
@ -171,7 +178,27 @@ public:
virtual void setAnonId(int i) { anonId = i; }
virtual int getAnonId() const { return anonId; }
virtual void dump(TInfoSink &infoSink) const;
virtual void setMemberExtensions(int member, int numExts, const char* const exts[])
{
assert(type.isStruct());
assert(numExts > 0);
if (memberExtensions == nullptr) {
memberExtensions = NewPoolObject(memberExtensions);
memberExtensions->resize(type.getStruct()->size());
}
for (int e = 0; e < numExts; ++e)
(*memberExtensions)[member].push_back(exts[e]);
}
virtual bool hasMemberExtensions() const { return memberExtensions != nullptr; }
virtual int getNumMemberExtensions(int member) const
{
return memberExtensions == nullptr ? 0 : (int)(*memberExtensions)[member].size();
}
virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); }
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
protected:
explicit TVariable(const TVariable&);
@ -179,15 +206,14 @@ protected:
TType type;
bool userType;
// we are assuming that Pool Allocator will free the memory allocated to unionArray
// when this object is destroyed
// TODO: these two should be a union
// A variable could be a compile-time constant, or a specialization
// constant, or neither, but never both.
TConstUnionArray constArray; // for compile-time constant value
TIntermTyped* constSubtree; // for specialization constant computation
int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose
TConstUnionArray constArray; // for compile-time constant value
TIntermTyped* constSubtree; // for specialization constant computation
TVector<TExtensionList>* memberExtensions; // per-member extension list, allocated only when needed
int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose
};
//
@ -293,7 +319,9 @@ public:
virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
virtual const TParameter& operator[](int i) const { return parameters[i]; }
virtual void dump(TInfoSink &infoSink) const override;
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TFunction(const TFunction&);
@ -324,35 +352,44 @@ protected:
//
class TAnonMember : public TSymbol {
public:
TAnonMember(const TString* n, unsigned int m, const TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { }
virtual TAnonMember* clone() const;
TAnonMember(const TString* n, unsigned int m, TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { }
virtual TAnonMember* clone() const override;
virtual ~TAnonMember() { }
virtual const TAnonMember* getAsAnonMember() const { return this; }
virtual const TAnonMember* getAsAnonMember() const override { return this; }
virtual const TVariable& getAnonContainer() const { return anonContainer; }
virtual unsigned int getMemberNumber() const { return memberNumber; }
virtual const TType& getType() const
virtual const TType& getType() const override
{
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
virtual TType& getWritableType()
virtual TType& getWritableType() override
{
assert(writable);
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
virtual void setExtensions(int numExts, const char* const exts[]) override
{
anonContainer.setMemberExtensions(memberNumber, numExts, exts);
}
virtual int getNumExtensions() const override { return anonContainer.getNumMemberExtensions(memberNumber); }
virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); }
virtual int getAnonId() const { return anonId; }
virtual void dump(TInfoSink &infoSink) const;
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TAnonMember(const TAnonMember&);
TAnonMember& operator=(const TAnonMember&);
const TVariable& anonContainer;
TVariable& anonContainer;
unsigned int memberNumber;
int anonId;
};
@ -514,7 +551,9 @@ public:
void relateToOperator(const char* name, TOperator op);
void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
void dump(TInfoSink &infoSink) const;
#ifndef GLSLANG_WEB
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
TSymbolTableLevel* clone() const;
void readOnly();
@ -788,15 +827,36 @@ public:
table[level]->setFunctionExtensions(name, num, extensions);
}
void setVariableExtensions(const char* name, int num, const char* const extensions[])
void setVariableExtensions(const char* name, int numExts, const char* const extensions[])
{
TSymbol* symbol = find(TString(name));
if (symbol)
symbol->setExtensions(num, extensions);
if (symbol == nullptr)
return;
symbol->setExtensions(numExts, extensions);
}
void setVariableExtensions(const char* blockName, const char* name, int numExts, const char* const extensions[])
{
TSymbol* symbol = find(TString(blockName));
if (symbol == nullptr)
return;
TVariable* variable = symbol->getAsVariable();
assert(variable != nullptr);
const TTypeList& structure = *variable->getAsVariable()->getType().getStruct();
for (int member = 0; member < (int)structure.size(); ++member) {
if (structure[member].type->getFieldName().compare(name) == 0) {
variable->setMemberExtensions(member, numExts, extensions);
return;
}
}
}
int getMaxSymbolId() { return uniqueId; }
void dump(TInfoSink &infoSink) const;
#ifndef GLSLANG_WEB
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
void copyTable(const TSymbolTable& copyOf);
void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }

533
Externals/glslang/glslang/MachineIndependent/Versions.cpp vendored Executable file → Normal file
View File

@ -2,6 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -144,6 +145,8 @@
namespace glslang {
#ifndef GLSLANG_WEB
//
// Initialize all extensions, almost always to 'disable', as once their features
// are incorporated into a core version, their features are supported through allowing that
@ -156,6 +159,7 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_EXT_frag_depth] = EBhDisable;
extensionBehavior[E_GL_OES_EGL_image_external] = EBhDisable;
extensionBehavior[E_GL_OES_EGL_image_external_essl3] = EBhDisable;
extensionBehavior[E_GL_EXT_YUV_target] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_texture_lod] = EBhDisable;
extensionBehavior[E_GL_EXT_shadow_samplers] = EBhDisable;
extensionBehavior[E_GL_ARB_texture_rectangle] = EBhDisable;
@ -168,8 +172,10 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_ARB_tessellation_shader] = EBhDisable;
extensionBehavior[E_GL_ARB_enhanced_layouts] = EBhDisable;
extensionBehavior[E_GL_ARB_texture_cube_map_array] = EBhDisable;
extensionBehavior[E_GL_ARB_texture_multisample] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_texture_lod] = EBhDisable;
extensionBehavior[E_GL_ARB_explicit_attrib_location] = EBhDisable;
extensionBehavior[E_GL_ARB_explicit_uniform_location] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_image_load_store] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_atomic_counters] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_draw_parameters] = EBhDisable;
@ -178,6 +184,7 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_ARB_shader_texture_image_samples] = EBhDisable;
extensionBehavior[E_GL_ARB_viewport_array] = EBhDisable;
extensionBehavior[E_GL_ARB_gpu_shader_int64] = EBhDisable;
extensionBehavior[E_GL_ARB_gpu_shader_fp64] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_ballot] = EBhDisable;
extensionBehavior[E_GL_ARB_sparse_texture2] = EBhDisable;
extensionBehavior[E_GL_ARB_sparse_texture_clamp] = EBhDisable;
@ -185,6 +192,10 @@ void TParseVersions::initializeExtensionBehavior()
// extensionBehavior[E_GL_ARB_cull_distance] = EBhDisable; // present for 4.5, but need extension control over block members
extensionBehavior[E_GL_ARB_post_depth_coverage] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_viewport_layer_array] = EBhDisable;
extensionBehavior[E_GL_ARB_fragment_shader_interlock] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_clock] = EBhDisable;
extensionBehavior[E_GL_ARB_uniform_buffer_object] = EBhDisable;
extensionBehavior[E_GL_ARB_sample_shading] = EBhDisable;
extensionBehavior[E_GL_KHR_shader_subgroup_basic] = EBhDisable;
extensionBehavior[E_GL_KHR_shader_subgroup_vote] = EBhDisable;
@ -194,18 +205,30 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_KHR_shader_subgroup_shuffle_relative] = EBhDisable;
extensionBehavior[E_GL_KHR_shader_subgroup_clustered] = EBhDisable;
extensionBehavior[E_GL_KHR_shader_subgroup_quad] = EBhDisable;
extensionBehavior[E_GL_KHR_memory_scope_semantics] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_atomic_int64] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_non_constant_global_initializers] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_image_load_formatted] = EBhDisable;
extensionBehavior[E_GL_EXT_post_depth_coverage] = EBhDisable;
extensionBehavior[E_GL_EXT_control_flow_attributes] = EBhDisable;
extensionBehavior[E_GL_EXT_nonuniform_qualifier] = EBhDisable;
extensionBehavior[E_GL_EXT_samplerless_texture_functions] = EBhDisable;
extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable;
extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference2] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference_uvec2] = EBhDisable;
extensionBehavior[E_GL_EXT_demote_to_helper_invocation] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable;
// #line and #include
extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable;
extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable;
#ifdef AMD_EXTENSIONS
extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable;
@ -216,9 +239,9 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable;
extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable;
#endif
#ifdef NV_EXTENSIONS
extensionBehavior[E_GL_INTEL_shader_integer_functions2] = EBhDisable;
extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable;
extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable;
extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable;
@ -228,7 +251,16 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_NV_conservative_raster_underestimation] = EBhDisable;
extensionBehavior[E_GL_NV_shader_noperspective_interpolation] = EBhDisable;
extensionBehavior[E_GL_NV_shader_subgroup_partitioned] = EBhDisable;
#endif
extensionBehavior[E_GL_NV_shading_rate_image] = EBhDisable;
extensionBehavior[E_GL_NV_ray_tracing] = EBhDisable;
extensionBehavior[E_GL_NV_fragment_shader_barycentric] = EBhDisable;
extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable;
extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable;
extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable;
extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable;
extensionBehavior[E_GL_NV_shader_sm_builtins] = EBhDisable;
extensionBehavior[E_GL_NV_integer_cooperative_matrix] = EBhDisable;
// AEP
extensionBehavior[E_GL_ANDROID_extension_pack_es31a] = EBhDisable;
@ -261,41 +293,52 @@ void TParseVersions::initializeExtensionBehavior()
// EXT extensions
extensionBehavior[E_GL_EXT_device_group] = EBhDisable;
extensionBehavior[E_GL_EXT_multiview] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_realtime_clock] = EBhDisable;
// OVR extensions
extensionBehavior[E_GL_OVR_multiview] = EBhDisable;
extensionBehavior[E_GL_OVR_multiview2] = EBhDisable;
// explicit types
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_int8] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_int16] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_int32] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_int64] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_float16] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_float32] = EBhDisable;
extensionBehavior[E_GL_KHX_shader_explicit_arithmetic_types_float64] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int8] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int16] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int32] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int64] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float16] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable;
// subgroup extended types
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int8] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int16] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int64] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_float16] = EBhDisable;
}
#endif // GLSLANG_WEB
// Get code that is not part of a shared symbol table, is specific to this shader,
// or needed by the preprocessor (which does not use a shared symbol table).
void TParseVersions::getPreamble(std::string& preamble)
{
if (profile == EEsProfile) {
if (isEsProfile()) {
preamble =
"#define GL_ES 1\n"
"#define GL_FRAGMENT_PRECISION_HIGH 1\n"
#ifdef GLSLANG_WEB
;
#else
"#define GL_OES_texture_3D 1\n"
"#define GL_OES_standard_derivatives 1\n"
"#define GL_EXT_frag_depth 1\n"
"#define GL_OES_EGL_image_external 1\n"
"#define GL_OES_EGL_image_external_essl3 1\n"
"#define GL_EXT_YUV_target 1\n"
"#define GL_EXT_shader_texture_lod 1\n"
"#define GL_EXT_shadow_samplers 1\n"
// AEP
"#define GL_ANDROID_extension_pack_es31a 1\n"
"#define GL_KHR_blend_equation_advanced 1\n"
"#define GL_OES_sample_variables 1\n"
"#define GL_OES_shader_image_atomic 1\n"
"#define GL_OES_shader_multisample_interpolation 1\n"
@ -323,11 +366,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_shader_non_constant_global_initializers 1\n"
;
#ifdef NV_EXTENSIONS
if (profile == EEsProfile && version >= 300) {
if (isEsProfile() && version >= 300) {
preamble += "#define GL_NV_shader_noperspective_interpolation 1\n";
}
#endif
} else {
preamble =
@ -341,8 +382,10 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_ARB_tessellation_shader 1\n"
"#define GL_ARB_enhanced_layouts 1\n"
"#define GL_ARB_texture_cube_map_array 1\n"
"#define GL_ARB_texture_multisample 1\n"
"#define GL_ARB_shader_texture_lod 1\n"
"#define GL_ARB_explicit_attrib_location 1\n"
"#define GL_ARB_explicit_uniform_location 1\n"
"#define GL_ARB_shader_image_load_store 1\n"
"#define GL_ARB_shader_atomic_counters 1\n"
"#define GL_ARB_shader_draw_parameters 1\n"
@ -351,17 +394,30 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_ARB_shader_texture_image_samples 1\n"
"#define GL_ARB_viewport_array 1\n"
"#define GL_ARB_gpu_shader_int64 1\n"
"#define GL_ARB_gpu_shader_fp64 1\n"
"#define GL_ARB_shader_ballot 1\n"
"#define GL_ARB_sparse_texture2 1\n"
"#define GL_ARB_sparse_texture_clamp 1\n"
"#define GL_ARB_shader_stencil_export 1\n"
"#define GL_ARB_sample_shading 1\n"
// "#define GL_ARB_cull_distance 1\n" // present for 4.5, but need extension control over block members
"#define GL_ARB_post_depth_coverage 1\n"
"#define GL_ARB_fragment_shader_interlock 1\n"
"#define GL_ARB_uniform_buffer_object 1\n"
"#define GL_EXT_shader_non_constant_global_initializers 1\n"
"#define GL_EXT_shader_image_load_formatted 1\n"
"#define GL_EXT_post_depth_coverage 1\n"
"#define GL_EXT_control_flow_attributes 1\n"
"#define GL_EXT_nonuniform_qualifier 1\n"
"#define GL_EXT_shader_16bit_storage 1\n"
"#define GL_EXT_shader_8bit_storage 1\n"
"#define GL_EXT_samplerless_texture_functions 1\n"
"#define GL_EXT_scalar_block_layout 1\n"
"#define GL_EXT_fragment_invocation_density 1\n"
"#define GL_EXT_buffer_reference 1\n"
"#define GL_EXT_buffer_reference2 1\n"
"#define GL_EXT_buffer_reference_uvec2 1\n"
"#define GL_EXT_demote_to_helper_invocation 1\n"
// GL_KHR_shader_subgroup
"#define GL_KHR_shader_subgroup_basic 1\n"
@ -373,7 +429,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_KHR_shader_subgroup_clustered 1\n"
"#define GL_KHR_shader_subgroup_quad 1\n"
#ifdef AMD_EXTENSIONS
"#define E_GL_EXT_shader_atomic_int64 1\n"
"#define E_GL_EXT_shader_realtime_clock 1\n"
"#define GL_AMD_shader_ballot 1\n"
"#define GL_AMD_shader_trinary_minmax 1\n"
"#define GL_AMD_shader_explicit_vertex_parameter 1\n"
@ -384,24 +442,37 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_AMD_shader_image_load_store_lod 1\n"
"#define GL_AMD_shader_fragment_mask 1\n"
"#define GL_AMD_gpu_shader_half_float_fetch 1\n"
#endif
#ifdef NV_EXTENSIONS
"#define GL_INTEL_shader_integer_functions2 1\n"
"#define GL_NV_sample_mask_override_coverage 1\n"
"#define GL_NV_geometry_shader_passthrough 1\n"
"#define GL_NV_viewport_array2 1\n"
"#define GL_NV_shader_atomic_int64 1\n"
"#define GL_NV_conservative_raster_underestimation 1\n"
"#define GL_NV_shader_subgroup_partitioned 1\n"
#endif
"#define GL_KHX_shader_explicit_arithmetic_types 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_int8 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_int16 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_int32 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_int64 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_float16 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_float32 1\n"
"#define GL_KHX_shader_explicit_arithmetic_types_float64 1\n"
"#define GL_NV_shading_rate_image 1\n"
"#define GL_NV_ray_tracing 1\n"
"#define GL_NV_fragment_shader_barycentric 1\n"
"#define GL_NV_compute_shader_derivatives 1\n"
"#define GL_NV_shader_texture_footprint 1\n"
"#define GL_NV_mesh_shader 1\n"
"#define GL_NV_cooperative_matrix 1\n"
"#define GL_NV_integer_cooperative_matrix 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_int8 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_int16 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_int32 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_int64 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_float16 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_float32 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_float64 1\n"
"#define GL_EXT_shader_subgroup_extended_types_int8 1\n"
"#define GL_EXT_shader_subgroup_extended_types_int16 1\n"
"#define GL_EXT_shader_subgroup_extended_types_int64 1\n"
"#define GL_EXT_shader_subgroup_extended_types_float16 1\n"
;
if (version >= 150) {
@ -411,13 +482,16 @@ void TParseVersions::getPreamble(std::string& preamble)
if (profile == ECompatibilityProfile)
preamble += "#define GL_compatibility_profile 1\n";
}
#endif // GLSLANG_WEB
}
if ((profile != EEsProfile && version >= 140) ||
(profile == EEsProfile && version >= 310)) {
#ifndef GLSLANG_WEB
if ((!isEsProfile() && version >= 140) ||
(isEsProfile() && version >= 310)) {
preamble +=
"#define GL_EXT_device_group 1\n"
"#define GL_EXT_multiview 1\n"
"#define GL_NV_shader_sm_builtins 1\n"
;
}
@ -432,7 +506,9 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble +=
"#define GL_GOOGLE_cpp_style_line_directive 1\n"
"#define GL_GOOGLE_include_directive 1\n"
"#define GL_KHR_blend_equation_advanced 1\n"
;
#endif
// #define VULKAN XXXX
const int numberBufSize = 12;
@ -443,6 +519,8 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += numberBuf;
preamble += "\n";
}
#ifndef GLSLANG_WEB
// #define GL_SPIRV XXXX
if (spvVersion.openGl > 0) {
preamble += "#define GL_SPIRV ";
@ -450,22 +528,7 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += numberBuf;
preamble += "\n";
}
}
//
// When to use requireProfile():
//
// Use if only some profiles support a feature. However, if within a profile the feature
// is version or extension specific, follow this call with calls to profileRequires().
//
// Operation: If the current profile is not one of the profileMask,
// give an error message.
//
void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (profile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
#endif
}
//
@ -475,62 +538,25 @@ const char* StageName(EShLanguage stage)
{
switch(stage) {
case EShLangVertex: return "vertex";
case EShLangFragment: return "fragment";
case EShLangCompute: return "compute";
#ifndef GLSLANG_WEB
case EShLangTessControl: return "tessellation control";
case EShLangTessEvaluation: return "tessellation evaluation";
case EShLangGeometry: return "geometry";
case EShLangFragment: return "fragment";
case EShLangCompute: return "compute";
case EShLangRayGenNV: return "ray-generation";
case EShLangIntersectNV: return "intersection";
case EShLangAnyHitNV: return "any-hit";
case EShLangClosestHitNV: return "closest-hit";
case EShLangMissNV: return "miss";
case EShLangCallableNV: return "callable";
case EShLangMeshNV: return "mesh";
case EShLangTaskNV: return "task";
#endif
default: return "unknown stage";
}
}
//
// When to use profileRequires():
//
// If a set of profiles have the same requirements for what version or extensions
// are needed to support a feature.
//
// It must be called for each profile that needs protection. Use requireProfile() first
// to reduce that set of profiles.
//
// Operation: Will issue warnings/errors based on the current profile, version, and extension
// behaviors. It only checks extensions when the current profile is one of the profileMask.
//
// A minVersion of 0 means no version of the profileMask support this in core,
// the extension must be present.
//
// entry point that takes multiple extensions
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc)
{
if (profile & profileMask) {
bool okay = false;
if (minVersion > 0 && version >= minVersion)
okay = true;
for (int i = 0; i < numExtensions; ++i) {
switch (getExtensionBehavior(extensions[i])) {
case EBhWarn:
infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
// fall through
case EBhRequire:
case EBhEnable:
okay = true;
break;
default: break; // some compilers want this
}
}
if (! okay)
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
}
// entry point for the above that takes a single extension
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
//
// When to use requireStage()
//
@ -551,6 +577,75 @@ void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, cons
requireStage(loc, static_cast<EShLanguageMask>(1 << stage), featureDesc);
}
#ifndef GLSLANG_WEB
//
// When to use requireProfile():
//
// Use if only some profiles support a feature. However, if within a profile the feature
// is version or extension specific, follow this call with calls to profileRequires().
//
// Operation: If the current profile is not one of the profileMask,
// give an error message.
//
void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (profile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
}
//
// When to use profileRequires():
//
// If a set of profiles have the same requirements for what version or extensions
// are needed to support a feature.
//
// It must be called for each profile that needs protection. Use requireProfile() first
// to reduce that set of profiles.
//
// Operation: Will issue warnings/errors based on the current profile, version, and extension
// behaviors. It only checks extensions when the current profile is one of the profileMask.
//
// A minVersion of 0 means no version of the profileMask support this in core,
// the extension must be present.
//
// entry point that takes multiple extensions
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc)
{
if (profile & profileMask) {
bool okay = minVersion > 0 && version >= minVersion;
#ifndef GLSLANG_WEB
for (int i = 0; i < numExtensions; ++i) {
switch (getExtensionBehavior(extensions[i])) {
case EBhWarn:
infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
// fall through
case EBhRequire:
case EBhEnable:
okay = true;
break;
default: break; // some compilers want this
}
}
#endif
if (! okay)
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
}
// entry point for the above that takes a single extension
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
{
error(loc, "feature not yet implemented", featureDesc, "");
}
//
// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether
// a future compatibility context is being use.
@ -584,11 +679,6 @@ void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, i
}
}
void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
{
error(loc, "feature not yet implemented", featureDesc, "");
}
// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false.
// Warns appropriately if the requested behavior of an extension is "warn".
bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
@ -708,6 +798,9 @@ void TParseVersions::updateExtensionBehavior(int line, const char* extension, co
return;
}
// check if extension is used with correct shader stage
checkExtensionStage(getCurrentLoc(), extension);
// update the requested extension
updateExtensionBehavior(extension, behavior);
@ -754,10 +847,22 @@ void TParseVersions::updateExtensionBehavior(int line, const char* extension, co
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#ifdef NV_EXTENSIONS
else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#endif
else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0 ||
strcmp(extension, "GL_EXT_buffer_reference_uvec2") == 0)
updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString);
else if (strcmp(extension, "GL_NV_integer_cooperative_matrix") == 0)
updateExtensionBehavior(line, "GL_NV_cooperative_matrix", behaviorString);
// subgroup extended types to explicit types
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int8") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int8", behaviorString);
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int16") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int16", behaviorString);
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int64") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int64", behaviorString);
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_float16") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_float16", behaviorString);
}
void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior)
@ -800,6 +905,18 @@ void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBe
}
}
// Check if extension is used with correct shader stage.
void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension)
{
// GL_NV_mesh_shader extension is only allowed in task/mesh shaders
if (strcmp(extension, "GL_NV_mesh_shader") == 0) {
requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask),
"#extension GL_NV_mesh_shader");
profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader");
profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader");
}
}
// Call for any operation needing full GLSL integer data-type support.
void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op)
{
@ -810,28 +927,98 @@ void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op)
// Call for any operation needing GLSL double data-type support.
void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op)
{
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 400, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 400, nullptr, op);
//requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader_fp64, op);
}
// Call for any operation needing GLSL float16 data-type support.
void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
#if AMD_EXTENSIONS
const char* const extensions[3] = {E_GL_AMD_gpu_shader_half_float,
E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_float16};
const char* const extensions[] = {
E_GL_AMD_gpu_shader_half_float,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
#else
const char* const extensions[2] = {E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_float16};
#endif
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, "explicit types");
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
bool TParseVersions::float16Arithmetic()
{
const char* const extensions[] = {
E_GL_AMD_gpu_shader_half_float,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
}
bool TParseVersions::int16Arithmetic()
{
const char* const extensions[] = {
E_GL_AMD_gpu_shader_int16,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
}
bool TParseVersions::int8Arithmetic()
{
const char* const extensions[] = {
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int8};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
}
void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
{
TString combined;
combined = op;
combined += ": ";
combined += featureDesc;
const char* const extensions[] = {
E_GL_AMD_gpu_shader_half_float,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
}
void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
{
TString combined;
combined = op;
combined += ": ";
combined += featureDesc;
const char* const extensions[] = {
E_GL_AMD_gpu_shader_int16,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
}
void TParseVersions::requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
{
TString combined;
combined = op;
combined += ": ";
combined += featureDesc;
const char* const extensions[] = {
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int8};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
}
void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[] = {
E_GL_AMD_gpu_shader_half_float,
E_GL_EXT_shader_16bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
@ -839,12 +1026,9 @@ void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool bu
void TParseVersions::explicitFloat32Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[2] = {E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_float32};
requireExtensions(loc, 2, extensions, "explicit types");
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float32};
requireExtensions(loc, 2, extensions, op);
}
}
@ -852,12 +1036,11 @@ void TParseVersions::explicitFloat32Check(const TSourceLoc& loc, const char* op,
void TParseVersions::explicitFloat64Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[2] = {E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_float64};
requireExtensions(loc, 2, extensions, "explicit types");
const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float64};
requireExtensions(loc, 2, extensions, op);
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
}
}
@ -865,44 +1048,54 @@ void TParseVersions::explicitFloat64Check(const TSourceLoc& loc, const char* op,
void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
const char* const extensions[2] = {E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int8};
requireExtensions(loc, 2, extensions, "explicit types");
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int8};
requireExtensions(loc, 2, extensions, op);
}
}
#ifdef AMD_EXTENSIONS
// Call for any operation needing GLSL float16 opaque-type support
void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
requireExtensions(loc, 1, &E_GL_AMD_gpu_shader_half_float_fetch, op);
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
}
}
#endif
// Call for any operation needing GLSL explicit int16 data-type support.
void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
#if AMD_EXTENSIONS
const char* const extensions[3] = {E_GL_AMD_gpu_shader_int16,
E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int16};
#else
const char* const extensions[2] = {E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int16};
#endif
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, "explicit types");
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
const char* const extensions[] = {
E_GL_AMD_gpu_shader_int16,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
const char* const extensions[] = {
E_GL_AMD_gpu_shader_int16,
E_GL_EXT_shader_16bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
void TParseVersions::int8ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
const char* const extensions[] = {
E_GL_EXT_shader_8bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int8};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
@ -910,12 +1103,9 @@ void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, b
void TParseVersions::explicitInt32Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
const char* const extensions[2] = {E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int32};
requireExtensions(loc, 2, extensions, "explicit types");
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int32};
requireExtensions(loc, 2, extensions, op);
}
}
@ -924,15 +1114,30 @@ void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool buil
{
if (! builtIn) {
const char* const extensions[3] = {E_GL_ARB_gpu_shader_int64,
E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int64};
requireExtensions(loc, 3, extensions, "shader int64");
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int64};
requireExtensions(loc, 3, extensions, op);
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
}
}
void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[] = {E_GL_NV_cooperative_matrix};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
void TParseVersions::intcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[] = {E_GL_NV_integer_cooperative_matrix};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
#endif // GLSLANG_WEB
// Call for any operation removed because SPIR-V is in use.
void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op)
{
@ -950,15 +1155,19 @@ void TParseVersions::vulkanRemoved(const TSourceLoc& loc, const char* op)
// Call for any operation that requires Vulkan.
void TParseVersions::requireVulkan(const TSourceLoc& loc, const char* op)
{
#ifndef GLSLANG_WEB
if (spvVersion.vulkan == 0)
error(loc, "only allowed when using GLSL for Vulkan", op, "");
#endif
}
// Call for any operation that requires SPIR-V.
void TParseVersions::requireSpv(const TSourceLoc& loc, const char* op)
{
#ifndef GLSLANG_WEB
if (spvVersion.spv == 0)
error(loc, "only allowed when generating SPIR-V", op, "");
#endif
}
} // end namespace glslang

74
Externals/glslang/glslang/MachineIndependent/Versions.h vendored Executable file → Normal file
View File

@ -2,6 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -73,7 +74,7 @@ inline const char* ProfileName(EProfile profile)
}
//
// What source rules, validation rules, target language, etc. are needed
// What source rules, validation rules, target language, etc. are needed or
// desired for SPIR-V?
//
// 0 means a target or rule set is not enabled (ignore rules from that entity).
@ -109,6 +110,7 @@ const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_deriv
const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth";
const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external";
const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3";
const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target";
const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod";
const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers";
@ -122,8 +124,10 @@ const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader
const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample";
const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location";
const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
@ -132,6 +136,7 @@ const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_con
const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64";
const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
@ -139,6 +144,10 @@ const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil
// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock";
const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock";
const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object";
const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading";
const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
@ -148,16 +157,31 @@ const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_sub
const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics";
const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64";
const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage";
const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage";
// EXT extensions
const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions";
const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2";
const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation";
const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock";
// Arrays of extensions for the above viewportEXTs duplications
@ -175,7 +199,6 @@ const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multi
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
#ifdef AMD_EXTENSIONS
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
@ -186,9 +209,8 @@ const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_sh
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
#endif
#ifdef NV_EXTENSIONS
const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2";
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
@ -199,12 +221,21 @@ const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_
const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation";
const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation";
const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned";
const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image";
const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing";
const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric";
const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives";
const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint";
const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader";
// Arrays of extensions for the above viewportEXTs duplications
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
#endif
const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins";
const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix";
// AEP
const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
@ -234,15 +265,20 @@ const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessel
const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
// KHX
const char* const E_GL_KHX_shader_explicit_arithmetic_types = "GL_KHX_shader_explicit_arithmetic_types";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int8 = "GL_KHX_shader_explicit_arithmetic_types_int8";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int16 = "GL_KHX_shader_explicit_arithmetic_types_int16";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int32 = "GL_KHX_shader_explicit_arithmetic_types_int32";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int64 = "GL_KHX_shader_explicit_arithmetic_types_int64";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_float16 = "GL_KHX_shader_explicit_arithmetic_types_float16";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_float32 = "GL_KHX_shader_explicit_arithmetic_types_float32";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_float64 = "GL_KHX_shader_explicit_arithmetic_types_float64";
// EXT
const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8";
const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16";
const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64";
const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16";
// Arrays of extensions for the above AEP duplications

View File

@ -34,6 +34,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "attribute.h"
#include "../Include/intermediate.h"
#include "ParseHelper.h"
@ -52,6 +54,7 @@ bool TAttributeArgs::getInt(int& value, int argNum) const
return true;
}
// extract strings out of attribute arguments stored in attribute aggregate.
// convert to lower case if converToLower is true (for case-insensitive compare convenience)
bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
@ -85,6 +88,9 @@ const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNu
if (argNum >= (int)args->getSequence().size())
return nullptr;
if (args->getSequence()[argNum]->getAsConstantUnion() == nullptr)
return nullptr;
const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0];
if (constVal == nullptr || constVal->getType() != basicType)
return nullptr;
@ -107,6 +113,16 @@ TAttributeType TParseContext::attributeFromName(const TString& name) const
return EatDependencyInfinite;
else if (name == "dependency_length")
return EatDependencyLength;
else if (name == "min_iterations")
return EatMinIterations;
else if (name == "max_iterations")
return EatMaxIterations;
else if (name == "iteration_multiple")
return EatIterationMultiple;
else if (name == "peel_count")
return EatPeelCount;
else if (name == "partial_count")
return EatPartialCount;
else
return EatNone;
}
@ -222,29 +238,101 @@ void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermN
}
for (auto it = attributes.begin(); it != attributes.end(); ++it) {
if (it->name != EatDependencyLength && it->size() > 0) {
warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
continue;
}
int value;
const auto noArgument = [&](const char* feature) {
if (it->size() > 0) {
warn(node->getLoc(), "expected no arguments", feature, "");
return false;
}
return true;
};
const auto positiveSignedArgument = [&](const char* feature, int& value) {
if (it->size() == 1 && it->getInt(value)) {
if (value <= 0) {
error(node->getLoc(), "must be positive", feature, "");
return false;
}
} else {
warn(node->getLoc(), "expected a single integer argument", feature, "");
return false;
}
return true;
};
const auto unsignedArgument = [&](const char* feature, unsigned int& uiValue) {
int value;
if (!(it->size() == 1 && it->getInt(value))) {
warn(node->getLoc(), "expected a single integer argument", feature, "");
return false;
}
uiValue = (unsigned int)value;
return true;
};
const auto positiveUnsignedArgument = [&](const char* feature, unsigned int& uiValue) {
int value;
if (it->size() == 1 && it->getInt(value)) {
if (value == 0) {
error(node->getLoc(), "must be greater than or equal to 1", feature, "");
return false;
}
} else {
warn(node->getLoc(), "expected a single integer argument", feature, "");
return false;
}
uiValue = (unsigned int)value;
return true;
};
const auto spirv14 = [&](const char* feature) {
if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4)
warn(node->getLoc(), "attribute requires a SPIR-V 1.4 target-env", feature, "");
};
int value = 0;
unsigned uiValue = 0;
switch (it->name) {
case EatUnroll:
loop->setUnroll();
if (noArgument("unroll"))
loop->setUnroll();
break;
case EatLoop:
loop->setDontUnroll();
if (noArgument("dont_unroll"))
loop->setDontUnroll();
break;
case EatDependencyInfinite:
loop->setLoopDependency(TIntermLoop::dependencyInfinite);
if (noArgument("dependency_infinite"))
loop->setLoopDependency(TIntermLoop::dependencyInfinite);
break;
case EatDependencyLength:
if (it->size() == 1 && it->getInt(value)) {
if (value <= 0)
error(node->getLoc(), "must be positive", "dependency_length", "");
if (positiveSignedArgument("dependency_length", value))
loop->setLoopDependency(value);
} else
warn(node->getLoc(), "expected a single integer argument", "dependency_length", "");
break;
case EatMinIterations:
spirv14("min_iterations");
if (unsignedArgument("min_iterations", uiValue))
loop->setMinIterations(uiValue);
break;
case EatMaxIterations:
spirv14("max_iterations");
if (unsignedArgument("max_iterations", uiValue))
loop->setMaxIterations(uiValue);
break;
case EatIterationMultiple:
spirv14("iteration_multiple");
if (positiveUnsignedArgument("iteration_multiple", uiValue))
loop->setIterationMultiple(uiValue);
break;
case EatPeelCount:
spirv14("peel_count");
if (unsignedArgument("peel_count", uiValue))
loop->setPeelCount(uiValue);
break;
case EatPartialCount:
spirv14("partial_count");
if (unsignedArgument("partial_count", uiValue))
loop->setPartialCount(uiValue);
break;
default:
warn(node->getLoc(), "attribute does not apply to a loop", "", "");
@ -253,5 +341,6 @@ void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermN
}
}
} // end namespace glslang
#endif // GLSLANG_WEB

View File

@ -71,7 +71,54 @@ namespace glslang {
EatPushConstant,
EatConstantId,
EatDependencyInfinite,
EatDependencyLength
EatDependencyLength,
EatMinIterations,
EatMaxIterations,
EatIterationMultiple,
EatPeelCount,
EatPartialCount,
EatFormatRgba32f,
EatFormatRgba16f,
EatFormatR32f,
EatFormatRgba8,
EatFormatRgba8Snorm,
EatFormatRg32f,
EatFormatRg16f,
EatFormatR11fG11fB10f,
EatFormatR16f,
EatFormatRgba16,
EatFormatRgb10A2,
EatFormatRg16,
EatFormatRg8,
EatFormatR16,
EatFormatR8,
EatFormatRgba16Snorm,
EatFormatRg16Snorm,
EatFormatRg8Snorm,
EatFormatR16Snorm,
EatFormatR8Snorm,
EatFormatRgba32i,
EatFormatRgba16i,
EatFormatRgba8i,
EatFormatR32i,
EatFormatRg32i,
EatFormatRg16i,
EatFormatRg8i,
EatFormatR16i,
EatFormatR8i,
EatFormatRgba32ui,
EatFormatRgba16ui,
EatFormatRgba8ui,
EatFormatR32ui,
EatFormatRgb10a2ui,
EatFormatRg32ui,
EatFormatRg16ui,
EatFormatRg8ui,
EatFormatR16ui,
EatFormatR8ui,
EatFormatUnknown,
EatNonWritable,
EatNonReadable
};
class TIntermAggregate;

View File

@ -78,7 +78,6 @@
#define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E
#ifdef AMD_EXTENSIONS
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
@ -94,7 +93,6 @@
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#endif
#define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E
@ -117,7 +115,6 @@
#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
#ifdef AMD_EXTENSIONS
#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
@ -149,7 +146,6 @@
#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
#endif
#define GL_INT_SAMPLER_1D 0x8DC9
#define GL_INT_SAMPLER_2D 0x8DCA

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
/* A Bison parser, made by GNU Bison 3.0. */
/* A Bison parser, made by GNU Bison 3.0.4. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -45,403 +45,423 @@ extern int yydebug;
# define YYTOKENTYPE
enum yytokentype
{
ATTRIBUTE = 258,
VARYING = 259,
FLOAT16_T = 260,
FLOAT = 261,
FLOAT32_T = 262,
DOUBLE = 263,
FLOAT64_T = 264,
CONST = 265,
BOOL = 266,
INT = 267,
UINT = 268,
INT64_T = 269,
UINT64_T = 270,
INT32_T = 271,
UINT32_T = 272,
INT16_T = 273,
UINT16_T = 274,
INT8_T = 275,
UINT8_T = 276,
BREAK = 277,
CONTINUE = 278,
DO = 279,
ELSE = 280,
FOR = 281,
IF = 282,
DISCARD = 283,
RETURN = 284,
SWITCH = 285,
CASE = 286,
DEFAULT = 287,
SUBROUTINE = 288,
BVEC2 = 289,
BVEC3 = 290,
BVEC4 = 291,
IVEC2 = 292,
IVEC3 = 293,
IVEC4 = 294,
UVEC2 = 295,
UVEC3 = 296,
UVEC4 = 297,
I64VEC2 = 298,
I64VEC3 = 299,
I64VEC4 = 300,
U64VEC2 = 301,
U64VEC3 = 302,
U64VEC4 = 303,
I32VEC2 = 304,
I32VEC3 = 305,
I32VEC4 = 306,
U32VEC2 = 307,
U32VEC3 = 308,
U32VEC4 = 309,
I16VEC2 = 310,
I16VEC3 = 311,
I16VEC4 = 312,
U16VEC2 = 313,
U16VEC3 = 314,
U16VEC4 = 315,
I8VEC2 = 316,
I8VEC3 = 317,
I8VEC4 = 318,
U8VEC2 = 319,
U8VEC3 = 320,
U8VEC4 = 321,
VEC2 = 322,
VEC3 = 323,
VEC4 = 324,
MAT2 = 325,
MAT3 = 326,
MAT4 = 327,
CENTROID = 328,
IN = 329,
OUT = 330,
INOUT = 331,
UNIFORM = 332,
PATCH = 333,
SAMPLE = 334,
BUFFER = 335,
SHARED = 336,
NONUNIFORM = 337,
COHERENT = 338,
VOLATILE = 339,
RESTRICT = 340,
READONLY = 341,
WRITEONLY = 342,
DVEC2 = 343,
DVEC3 = 344,
DVEC4 = 345,
DMAT2 = 346,
DMAT3 = 347,
DMAT4 = 348,
F16VEC2 = 349,
F16VEC3 = 350,
F16VEC4 = 351,
F16MAT2 = 352,
F16MAT3 = 353,
F16MAT4 = 354,
F32VEC2 = 355,
F32VEC3 = 356,
F32VEC4 = 357,
F32MAT2 = 358,
F32MAT3 = 359,
F32MAT4 = 360,
F64VEC2 = 361,
F64VEC3 = 362,
F64VEC4 = 363,
F64MAT2 = 364,
F64MAT3 = 365,
F64MAT4 = 366,
NOPERSPECTIVE = 367,
FLAT = 368,
SMOOTH = 369,
LAYOUT = 370,
__EXPLICITINTERPAMD = 371,
MAT2X2 = 372,
MAT2X3 = 373,
MAT2X4 = 374,
MAT3X2 = 375,
MAT3X3 = 376,
MAT3X4 = 377,
MAT4X2 = 378,
MAT4X3 = 379,
MAT4X4 = 380,
DMAT2X2 = 381,
DMAT2X3 = 382,
DMAT2X4 = 383,
DMAT3X2 = 384,
DMAT3X3 = 385,
DMAT3X4 = 386,
DMAT4X2 = 387,
DMAT4X3 = 388,
DMAT4X4 = 389,
F16MAT2X2 = 390,
F16MAT2X3 = 391,
F16MAT2X4 = 392,
F16MAT3X2 = 393,
F16MAT3X3 = 394,
F16MAT3X4 = 395,
F16MAT4X2 = 396,
F16MAT4X3 = 397,
F16MAT4X4 = 398,
F32MAT2X2 = 399,
F32MAT2X3 = 400,
F32MAT2X4 = 401,
F32MAT3X2 = 402,
F32MAT3X3 = 403,
F32MAT3X4 = 404,
F32MAT4X2 = 405,
F32MAT4X3 = 406,
F32MAT4X4 = 407,
F64MAT2X2 = 408,
F64MAT2X3 = 409,
F64MAT2X4 = 410,
F64MAT3X2 = 411,
F64MAT3X3 = 412,
F64MAT3X4 = 413,
F64MAT4X2 = 414,
F64MAT4X3 = 415,
F64MAT4X4 = 416,
ATOMIC_UINT = 417,
SAMPLER1D = 418,
SAMPLER2D = 419,
SAMPLER3D = 420,
SAMPLERCUBE = 421,
SAMPLER1DSHADOW = 422,
SAMPLER2DSHADOW = 423,
SAMPLERCUBESHADOW = 424,
SAMPLER1DARRAY = 425,
SAMPLER2DARRAY = 426,
SAMPLER1DARRAYSHADOW = 427,
SAMPLER2DARRAYSHADOW = 428,
ISAMPLER1D = 429,
ISAMPLER2D = 430,
ISAMPLER3D = 431,
ISAMPLERCUBE = 432,
ISAMPLER1DARRAY = 433,
ISAMPLER2DARRAY = 434,
USAMPLER1D = 435,
USAMPLER2D = 436,
USAMPLER3D = 437,
USAMPLERCUBE = 438,
USAMPLER1DARRAY = 439,
USAMPLER2DARRAY = 440,
SAMPLER2DRECT = 441,
SAMPLER2DRECTSHADOW = 442,
ISAMPLER2DRECT = 443,
USAMPLER2DRECT = 444,
SAMPLERBUFFER = 445,
ISAMPLERBUFFER = 446,
USAMPLERBUFFER = 447,
SAMPLERCUBEARRAY = 448,
SAMPLERCUBEARRAYSHADOW = 449,
ISAMPLERCUBEARRAY = 450,
USAMPLERCUBEARRAY = 451,
SAMPLER2DMS = 452,
ISAMPLER2DMS = 453,
USAMPLER2DMS = 454,
SAMPLER2DMSARRAY = 455,
ISAMPLER2DMSARRAY = 456,
USAMPLER2DMSARRAY = 457,
SAMPLEREXTERNALOES = 458,
F16SAMPLER1D = 459,
F16SAMPLER2D = 460,
F16SAMPLER3D = 461,
F16SAMPLER2DRECT = 462,
F16SAMPLERCUBE = 463,
F16SAMPLER1DARRAY = 464,
F16SAMPLER2DARRAY = 465,
F16SAMPLERCUBEARRAY = 466,
F16SAMPLERBUFFER = 467,
F16SAMPLER2DMS = 468,
F16SAMPLER2DMSARRAY = 469,
F16SAMPLER1DSHADOW = 470,
F16SAMPLER2DSHADOW = 471,
F16SAMPLER1DARRAYSHADOW = 472,
F16SAMPLER2DARRAYSHADOW = 473,
F16SAMPLER2DRECTSHADOW = 474,
F16SAMPLERCUBESHADOW = 475,
F16SAMPLERCUBEARRAYSHADOW = 476,
SAMPLER = 477,
SAMPLERSHADOW = 478,
TEXTURE1D = 479,
TEXTURE2D = 480,
TEXTURE3D = 481,
TEXTURECUBE = 482,
TEXTURE1DARRAY = 483,
TEXTURE2DARRAY = 484,
ITEXTURE1D = 485,
ITEXTURE2D = 486,
ITEXTURE3D = 487,
ITEXTURECUBE = 488,
ITEXTURE1DARRAY = 489,
ITEXTURE2DARRAY = 490,
UTEXTURE1D = 491,
UTEXTURE2D = 492,
UTEXTURE3D = 493,
UTEXTURECUBE = 494,
UTEXTURE1DARRAY = 495,
UTEXTURE2DARRAY = 496,
TEXTURE2DRECT = 497,
ITEXTURE2DRECT = 498,
UTEXTURE2DRECT = 499,
TEXTUREBUFFER = 500,
ITEXTUREBUFFER = 501,
UTEXTUREBUFFER = 502,
TEXTURECUBEARRAY = 503,
ITEXTURECUBEARRAY = 504,
UTEXTURECUBEARRAY = 505,
TEXTURE2DMS = 506,
ITEXTURE2DMS = 507,
UTEXTURE2DMS = 508,
TEXTURE2DMSARRAY = 509,
ITEXTURE2DMSARRAY = 510,
UTEXTURE2DMSARRAY = 511,
F16TEXTURE1D = 512,
F16TEXTURE2D = 513,
F16TEXTURE3D = 514,
F16TEXTURE2DRECT = 515,
F16TEXTURECUBE = 516,
F16TEXTURE1DARRAY = 517,
F16TEXTURE2DARRAY = 518,
F16TEXTURECUBEARRAY = 519,
F16TEXTUREBUFFER = 520,
F16TEXTURE2DMS = 521,
F16TEXTURE2DMSARRAY = 522,
SUBPASSINPUT = 523,
SUBPASSINPUTMS = 524,
ISUBPASSINPUT = 525,
ISUBPASSINPUTMS = 526,
USUBPASSINPUT = 527,
USUBPASSINPUTMS = 528,
F16SUBPASSINPUT = 529,
F16SUBPASSINPUTMS = 530,
IMAGE1D = 531,
IIMAGE1D = 532,
UIMAGE1D = 533,
IMAGE2D = 534,
IIMAGE2D = 535,
UIMAGE2D = 536,
IMAGE3D = 537,
IIMAGE3D = 538,
UIMAGE3D = 539,
IMAGE2DRECT = 540,
IIMAGE2DRECT = 541,
UIMAGE2DRECT = 542,
IMAGECUBE = 543,
IIMAGECUBE = 544,
UIMAGECUBE = 545,
IMAGEBUFFER = 546,
IIMAGEBUFFER = 547,
UIMAGEBUFFER = 548,
IMAGE1DARRAY = 549,
IIMAGE1DARRAY = 550,
UIMAGE1DARRAY = 551,
IMAGE2DARRAY = 552,
IIMAGE2DARRAY = 553,
UIMAGE2DARRAY = 554,
IMAGECUBEARRAY = 555,
IIMAGECUBEARRAY = 556,
UIMAGECUBEARRAY = 557,
IMAGE2DMS = 558,
IIMAGE2DMS = 559,
UIMAGE2DMS = 560,
IMAGE2DMSARRAY = 561,
IIMAGE2DMSARRAY = 562,
UIMAGE2DMSARRAY = 563,
F16IMAGE1D = 564,
F16IMAGE2D = 565,
F16IMAGE3D = 566,
F16IMAGE2DRECT = 567,
F16IMAGECUBE = 568,
F16IMAGE1DARRAY = 569,
F16IMAGE2DARRAY = 570,
F16IMAGECUBEARRAY = 571,
F16IMAGEBUFFER = 572,
F16IMAGE2DMS = 573,
F16IMAGE2DMSARRAY = 574,
STRUCT = 575,
VOID = 576,
WHILE = 577,
IDENTIFIER = 578,
TYPE_NAME = 579,
FLOATCONSTANT = 580,
DOUBLECONSTANT = 581,
INT16CONSTANT = 582,
UINT16CONSTANT = 583,
INT32CONSTANT = 584,
UINT32CONSTANT = 585,
INTCONSTANT = 586,
UINTCONSTANT = 587,
INT64CONSTANT = 588,
UINT64CONSTANT = 589,
BOOLCONSTANT = 590,
FLOAT16CONSTANT = 591,
LEFT_OP = 592,
RIGHT_OP = 593,
INC_OP = 594,
DEC_OP = 595,
LE_OP = 596,
GE_OP = 597,
EQ_OP = 598,
NE_OP = 599,
AND_OP = 600,
OR_OP = 601,
XOR_OP = 602,
MUL_ASSIGN = 603,
DIV_ASSIGN = 604,
ADD_ASSIGN = 605,
MOD_ASSIGN = 606,
LEFT_ASSIGN = 607,
RIGHT_ASSIGN = 608,
AND_ASSIGN = 609,
XOR_ASSIGN = 610,
OR_ASSIGN = 611,
SUB_ASSIGN = 612,
LEFT_PAREN = 613,
RIGHT_PAREN = 614,
LEFT_BRACKET = 615,
RIGHT_BRACKET = 616,
LEFT_BRACE = 617,
RIGHT_BRACE = 618,
DOT = 619,
COMMA = 620,
COLON = 621,
EQUAL = 622,
SEMICOLON = 623,
BANG = 624,
DASH = 625,
TILDE = 626,
PLUS = 627,
STAR = 628,
SLASH = 629,
PERCENT = 630,
LEFT_ANGLE = 631,
RIGHT_ANGLE = 632,
VERTICAL_BAR = 633,
CARET = 634,
AMPERSAND = 635,
QUESTION = 636,
INVARIANT = 637,
PRECISE = 638,
HIGH_PRECISION = 639,
MEDIUM_PRECISION = 640,
LOW_PRECISION = 641,
PRECISION = 642,
PACKED = 643,
RESOURCE = 644,
SUPERP = 645
CONST = 258,
BOOL = 259,
INT = 260,
UINT = 261,
FLOAT = 262,
BVEC2 = 263,
BVEC3 = 264,
BVEC4 = 265,
IVEC2 = 266,
IVEC3 = 267,
IVEC4 = 268,
UVEC2 = 269,
UVEC3 = 270,
UVEC4 = 271,
VEC2 = 272,
VEC3 = 273,
VEC4 = 274,
MAT2 = 275,
MAT3 = 276,
MAT4 = 277,
MAT2X2 = 278,
MAT2X3 = 279,
MAT2X4 = 280,
MAT3X2 = 281,
MAT3X3 = 282,
MAT3X4 = 283,
MAT4X2 = 284,
MAT4X3 = 285,
MAT4X4 = 286,
SAMPLER2D = 287,
SAMPLER3D = 288,
SAMPLERCUBE = 289,
SAMPLER2DSHADOW = 290,
SAMPLERCUBESHADOW = 291,
SAMPLER2DARRAY = 292,
SAMPLER2DARRAYSHADOW = 293,
ISAMPLER2D = 294,
ISAMPLER3D = 295,
ISAMPLERCUBE = 296,
ISAMPLER2DARRAY = 297,
USAMPLER2D = 298,
USAMPLER3D = 299,
USAMPLERCUBE = 300,
USAMPLER2DARRAY = 301,
SAMPLER = 302,
SAMPLERSHADOW = 303,
TEXTURE2D = 304,
TEXTURE3D = 305,
TEXTURECUBE = 306,
TEXTURE2DARRAY = 307,
ITEXTURE2D = 308,
ITEXTURE3D = 309,
ITEXTURECUBE = 310,
ITEXTURE2DARRAY = 311,
UTEXTURE2D = 312,
UTEXTURE3D = 313,
UTEXTURECUBE = 314,
UTEXTURE2DARRAY = 315,
ATTRIBUTE = 316,
VARYING = 317,
FLOAT16_T = 318,
FLOAT32_T = 319,
DOUBLE = 320,
FLOAT64_T = 321,
INT64_T = 322,
UINT64_T = 323,
INT32_T = 324,
UINT32_T = 325,
INT16_T = 326,
UINT16_T = 327,
INT8_T = 328,
UINT8_T = 329,
I64VEC2 = 330,
I64VEC3 = 331,
I64VEC4 = 332,
U64VEC2 = 333,
U64VEC3 = 334,
U64VEC4 = 335,
I32VEC2 = 336,
I32VEC3 = 337,
I32VEC4 = 338,
U32VEC2 = 339,
U32VEC3 = 340,
U32VEC4 = 341,
I16VEC2 = 342,
I16VEC3 = 343,
I16VEC4 = 344,
U16VEC2 = 345,
U16VEC3 = 346,
U16VEC4 = 347,
I8VEC2 = 348,
I8VEC3 = 349,
I8VEC4 = 350,
U8VEC2 = 351,
U8VEC3 = 352,
U8VEC4 = 353,
DVEC2 = 354,
DVEC3 = 355,
DVEC4 = 356,
DMAT2 = 357,
DMAT3 = 358,
DMAT4 = 359,
F16VEC2 = 360,
F16VEC3 = 361,
F16VEC4 = 362,
F16MAT2 = 363,
F16MAT3 = 364,
F16MAT4 = 365,
F32VEC2 = 366,
F32VEC3 = 367,
F32VEC4 = 368,
F32MAT2 = 369,
F32MAT3 = 370,
F32MAT4 = 371,
F64VEC2 = 372,
F64VEC3 = 373,
F64VEC4 = 374,
F64MAT2 = 375,
F64MAT3 = 376,
F64MAT4 = 377,
DMAT2X2 = 378,
DMAT2X3 = 379,
DMAT2X4 = 380,
DMAT3X2 = 381,
DMAT3X3 = 382,
DMAT3X4 = 383,
DMAT4X2 = 384,
DMAT4X3 = 385,
DMAT4X4 = 386,
F16MAT2X2 = 387,
F16MAT2X3 = 388,
F16MAT2X4 = 389,
F16MAT3X2 = 390,
F16MAT3X3 = 391,
F16MAT3X4 = 392,
F16MAT4X2 = 393,
F16MAT4X3 = 394,
F16MAT4X4 = 395,
F32MAT2X2 = 396,
F32MAT2X3 = 397,
F32MAT2X4 = 398,
F32MAT3X2 = 399,
F32MAT3X3 = 400,
F32MAT3X4 = 401,
F32MAT4X2 = 402,
F32MAT4X3 = 403,
F32MAT4X4 = 404,
F64MAT2X2 = 405,
F64MAT2X3 = 406,
F64MAT2X4 = 407,
F64MAT3X2 = 408,
F64MAT3X3 = 409,
F64MAT3X4 = 410,
F64MAT4X2 = 411,
F64MAT4X3 = 412,
F64MAT4X4 = 413,
ATOMIC_UINT = 414,
ACCSTRUCTNV = 415,
FCOOPMATNV = 416,
ICOOPMATNV = 417,
UCOOPMATNV = 418,
SAMPLERCUBEARRAY = 419,
SAMPLERCUBEARRAYSHADOW = 420,
ISAMPLERCUBEARRAY = 421,
USAMPLERCUBEARRAY = 422,
SAMPLER1D = 423,
SAMPLER1DARRAY = 424,
SAMPLER1DARRAYSHADOW = 425,
ISAMPLER1D = 426,
SAMPLER1DSHADOW = 427,
SAMPLER2DRECT = 428,
SAMPLER2DRECTSHADOW = 429,
ISAMPLER2DRECT = 430,
USAMPLER2DRECT = 431,
SAMPLERBUFFER = 432,
ISAMPLERBUFFER = 433,
USAMPLERBUFFER = 434,
SAMPLER2DMS = 435,
ISAMPLER2DMS = 436,
USAMPLER2DMS = 437,
SAMPLER2DMSARRAY = 438,
ISAMPLER2DMSARRAY = 439,
USAMPLER2DMSARRAY = 440,
SAMPLEREXTERNALOES = 441,
SAMPLEREXTERNAL2DY2YEXT = 442,
ISAMPLER1DARRAY = 443,
USAMPLER1D = 444,
USAMPLER1DARRAY = 445,
F16SAMPLER1D = 446,
F16SAMPLER2D = 447,
F16SAMPLER3D = 448,
F16SAMPLER2DRECT = 449,
F16SAMPLERCUBE = 450,
F16SAMPLER1DARRAY = 451,
F16SAMPLER2DARRAY = 452,
F16SAMPLERCUBEARRAY = 453,
F16SAMPLERBUFFER = 454,
F16SAMPLER2DMS = 455,
F16SAMPLER2DMSARRAY = 456,
F16SAMPLER1DSHADOW = 457,
F16SAMPLER2DSHADOW = 458,
F16SAMPLER1DARRAYSHADOW = 459,
F16SAMPLER2DARRAYSHADOW = 460,
F16SAMPLER2DRECTSHADOW = 461,
F16SAMPLERCUBESHADOW = 462,
F16SAMPLERCUBEARRAYSHADOW = 463,
IMAGE1D = 464,
IIMAGE1D = 465,
UIMAGE1D = 466,
IMAGE2D = 467,
IIMAGE2D = 468,
UIMAGE2D = 469,
IMAGE3D = 470,
IIMAGE3D = 471,
UIMAGE3D = 472,
IMAGE2DRECT = 473,
IIMAGE2DRECT = 474,
UIMAGE2DRECT = 475,
IMAGECUBE = 476,
IIMAGECUBE = 477,
UIMAGECUBE = 478,
IMAGEBUFFER = 479,
IIMAGEBUFFER = 480,
UIMAGEBUFFER = 481,
IMAGE1DARRAY = 482,
IIMAGE1DARRAY = 483,
UIMAGE1DARRAY = 484,
IMAGE2DARRAY = 485,
IIMAGE2DARRAY = 486,
UIMAGE2DARRAY = 487,
IMAGECUBEARRAY = 488,
IIMAGECUBEARRAY = 489,
UIMAGECUBEARRAY = 490,
IMAGE2DMS = 491,
IIMAGE2DMS = 492,
UIMAGE2DMS = 493,
IMAGE2DMSARRAY = 494,
IIMAGE2DMSARRAY = 495,
UIMAGE2DMSARRAY = 496,
F16IMAGE1D = 497,
F16IMAGE2D = 498,
F16IMAGE3D = 499,
F16IMAGE2DRECT = 500,
F16IMAGECUBE = 501,
F16IMAGE1DARRAY = 502,
F16IMAGE2DARRAY = 503,
F16IMAGECUBEARRAY = 504,
F16IMAGEBUFFER = 505,
F16IMAGE2DMS = 506,
F16IMAGE2DMSARRAY = 507,
TEXTURECUBEARRAY = 508,
ITEXTURECUBEARRAY = 509,
UTEXTURECUBEARRAY = 510,
TEXTURE1D = 511,
ITEXTURE1D = 512,
UTEXTURE1D = 513,
TEXTURE1DARRAY = 514,
ITEXTURE1DARRAY = 515,
UTEXTURE1DARRAY = 516,
TEXTURE2DRECT = 517,
ITEXTURE2DRECT = 518,
UTEXTURE2DRECT = 519,
TEXTUREBUFFER = 520,
ITEXTUREBUFFER = 521,
UTEXTUREBUFFER = 522,
TEXTURE2DMS = 523,
ITEXTURE2DMS = 524,
UTEXTURE2DMS = 525,
TEXTURE2DMSARRAY = 526,
ITEXTURE2DMSARRAY = 527,
UTEXTURE2DMSARRAY = 528,
F16TEXTURE1D = 529,
F16TEXTURE2D = 530,
F16TEXTURE3D = 531,
F16TEXTURE2DRECT = 532,
F16TEXTURECUBE = 533,
F16TEXTURE1DARRAY = 534,
F16TEXTURE2DARRAY = 535,
F16TEXTURECUBEARRAY = 536,
F16TEXTUREBUFFER = 537,
F16TEXTURE2DMS = 538,
F16TEXTURE2DMSARRAY = 539,
SUBPASSINPUT = 540,
SUBPASSINPUTMS = 541,
ISUBPASSINPUT = 542,
ISUBPASSINPUTMS = 543,
USUBPASSINPUT = 544,
USUBPASSINPUTMS = 545,
F16SUBPASSINPUT = 546,
F16SUBPASSINPUTMS = 547,
LEFT_OP = 548,
RIGHT_OP = 549,
INC_OP = 550,
DEC_OP = 551,
LE_OP = 552,
GE_OP = 553,
EQ_OP = 554,
NE_OP = 555,
AND_OP = 556,
OR_OP = 557,
XOR_OP = 558,
MUL_ASSIGN = 559,
DIV_ASSIGN = 560,
ADD_ASSIGN = 561,
MOD_ASSIGN = 562,
LEFT_ASSIGN = 563,
RIGHT_ASSIGN = 564,
AND_ASSIGN = 565,
XOR_ASSIGN = 566,
OR_ASSIGN = 567,
SUB_ASSIGN = 568,
LEFT_PAREN = 569,
RIGHT_PAREN = 570,
LEFT_BRACKET = 571,
RIGHT_BRACKET = 572,
LEFT_BRACE = 573,
RIGHT_BRACE = 574,
DOT = 575,
COMMA = 576,
COLON = 577,
EQUAL = 578,
SEMICOLON = 579,
BANG = 580,
DASH = 581,
TILDE = 582,
PLUS = 583,
STAR = 584,
SLASH = 585,
PERCENT = 586,
LEFT_ANGLE = 587,
RIGHT_ANGLE = 588,
VERTICAL_BAR = 589,
CARET = 590,
AMPERSAND = 591,
QUESTION = 592,
INVARIANT = 593,
HIGH_PRECISION = 594,
MEDIUM_PRECISION = 595,
LOW_PRECISION = 596,
PRECISION = 597,
PACKED = 598,
RESOURCE = 599,
SUPERP = 600,
FLOATCONSTANT = 601,
INTCONSTANT = 602,
UINTCONSTANT = 603,
BOOLCONSTANT = 604,
IDENTIFIER = 605,
TYPE_NAME = 606,
CENTROID = 607,
IN = 608,
OUT = 609,
INOUT = 610,
STRUCT = 611,
VOID = 612,
WHILE = 613,
BREAK = 614,
CONTINUE = 615,
DO = 616,
ELSE = 617,
FOR = 618,
IF = 619,
DISCARD = 620,
RETURN = 621,
SWITCH = 622,
CASE = 623,
DEFAULT = 624,
UNIFORM = 625,
SHARED = 626,
BUFFER = 627,
FLAT = 628,
SMOOTH = 629,
LAYOUT = 630,
DOUBLECONSTANT = 631,
INT16CONSTANT = 632,
UINT16CONSTANT = 633,
FLOAT16CONSTANT = 634,
INT32CONSTANT = 635,
UINT32CONSTANT = 636,
INT64CONSTANT = 637,
UINT64CONSTANT = 638,
SUBROUTINE = 639,
DEMOTE = 640,
PAYLOADNV = 641,
PAYLOADINNV = 642,
HITATTRNV = 643,
CALLDATANV = 644,
CALLDATAINNV = 645,
PATCH = 646,
SAMPLE = 647,
NONUNIFORM = 648,
COHERENT = 649,
VOLATILE = 650,
RESTRICT = 651,
READONLY = 652,
WRITEONLY = 653,
DEVICECOHERENT = 654,
QUEUEFAMILYCOHERENT = 655,
WORKGROUPCOHERENT = 656,
SUBGROUPCOHERENT = 657,
NONPRIVATE = 658,
NOPERSPECTIVE = 659,
EXPLICITINTERPAMD = 660,
PERVERTEXNV = 661,
PERPRIMITIVENV = 662,
PERVIEWNV = 663,
PERTASKNV = 664,
PRECISE = 665
};
#endif
/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE YYSTYPE;
union YYSTYPE
{
#line 70 "MachineIndependent/glslang.y" /* yacc.c:1909 */
#line 96 "MachineIndependent/glslang.y" /* yacc.c:1909 */
struct {
glslang::TSourceLoc loc;
@ -474,10 +494,13 @@ union YYSTYPE
glslang::TArraySizes* arraySizes;
glslang::TIdentifierList* identifierList;
};
glslang::TArraySizes* typeParameters;
} interm;
#line 480 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
#line 501 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
};
typedef union YYSTYPE YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define YYSTYPE_IS_DECLARED 1
#endif

108
Externals/glslang/glslang/MachineIndependent/intermOut.cpp vendored Executable file → Normal file
View File

@ -35,6 +35,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "localintermediate.h"
#include "../Include/InfoSink.h"
@ -43,6 +45,7 @@
#else
#include <cmath>
#endif
#include <cstdint>
namespace {
@ -172,8 +175,12 @@ bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
case EOpIndexDirect: out.debug << "direct index"; break;
case EOpIndexIndirect: out.debug << "indirect index"; break;
case EOpIndexDirectStruct:
out.debug << (*node->getLeft()->getType().getStruct())[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName();
out.debug << ": direct index for structure"; break;
{
bool reference = node->getLeft()->getType().isReference();
const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct();
out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName();
out.debug << ": direct index for structure"; break;
}
case EOpVectorSwizzle: out.debug << "vector swizzle"; break;
case EOpMatrixSwizzle: out.debug << "matrix swizzle"; break;
@ -206,6 +213,13 @@ bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
case EOpLogicalXor: out.debug << "logical-xor"; break;
case EOpLogicalAnd: out.debug << "logical-and"; break;
case EOpAbsDifference: out.debug << "absoluteDifference"; break;
case EOpAddSaturate: out.debug << "addSaturate"; break;
case EOpSubSaturate: out.debug << "subtractSaturate"; break;
case EOpAverage: out.debug << "average"; break;
case EOpAverageRounded: out.debug << "averageRounded"; break;
case EOpMul32x16: out.debug << "multiply32x16"; break;
default: out.debug << "<unknown op>";
}
@ -232,6 +246,7 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpPostDecrement: out.debug << "Post-Decrement"; break;
case EOpPreIncrement: out.debug << "Pre-Increment"; break;
case EOpPreDecrement: out.debug << "Pre-Decrement"; break;
case EOpCopyObject: out.debug << "copy object"; break;
// * -> bool
case EOpConvInt8ToBool: out.debug << "Convert int8_t to bool"; break;
@ -419,6 +434,8 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpConvDoubleToUint: out.debug << "Convert double to uint"; break;
case EOpConvDoubleToUint64: out.debug << "Convert double to uint64"; break;
case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break;
case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break;
case EOpRadians: out.debug << "radians"; break;
case EOpDegrees: out.debug << "degrees"; break;
@ -547,6 +564,9 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpFindLSB: out.debug << "findLSB"; break;
case EOpFindMSB: out.debug << "findMSB"; break;
case EOpCountLeadingZeros: out.debug << "countLeadingZeros"; break;
case EOpCountTrailingZeros: out.debug << "countTrailingZeros"; break;
case EOpNoise: out.debug << "noise"; break;
case EOpBallot: out.debug << "ballot"; break;
@ -607,7 +627,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
#ifdef NV_EXTENSIONS
case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
@ -630,7 +649,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
#endif
case EOpClip: out.debug << "clip"; break;
case EOpIsFinite: out.debug << "isfinite"; break;
@ -640,7 +658,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break;
#ifdef AMD_EXTENSIONS
case EOpMinInvocations: out.debug << "minInvocations"; break;
case EOpMaxInvocations: out.debug << "maxInvocations"; break;
case EOpAddInvocations: out.debug << "addInvocations"; break;
@ -669,11 +686,12 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break;
case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break;
#endif
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
case EOpConstructReference: out.debug << "Construct reference type"; break;
default: out.debug.message(EPrefixError, "Bad unary op");
}
@ -808,6 +826,8 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpConstructF16Mat4x4: out.debug << "Construct f16mat4"; break;
case EOpConstructStruct: out.debug << "Construct structure"; break;
case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break;
case EOpConstructReference: out.debug << "Construct reference"; break;
case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break;
case EOpLessThan: out.debug << "Compare Less Than"; break;
case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
@ -851,7 +871,6 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpReadInvocation: out.debug << "readInvocation"; break;
#ifdef AMD_EXTENSIONS
case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break;
case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break;
case EOpWriteInvocation: out.debug << "writeInvocation"; break;
@ -859,9 +878,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpMin3: out.debug << "min3"; break;
case EOpMax3: out.debug << "max3"; break;
case EOpMid3: out.debug << "mid3"; break;
case EOpTime: out.debug << "time"; break;
#endif
case EOpAtomicAdd: out.debug << "AtomicAdd"; break;
case EOpAtomicMin: out.debug << "AtomicMin"; break;
@ -871,6 +888,8 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpAtomicXor: out.debug << "AtomicXor"; break;
case EOpAtomicExchange: out.debug << "AtomicExchange"; break;
case EOpAtomicCompSwap: out.debug << "AtomicCompSwap"; break;
case EOpAtomicLoad: out.debug << "AtomicLoad"; break;
case EOpAtomicStore: out.debug << "AtomicStore"; break;
case EOpAtomicCounterAdd: out.debug << "AtomicCounterAdd"; break;
case EOpAtomicCounterSubtract: out.debug << "AtomicCounterSubtract"; break;
@ -894,10 +913,10 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpImageAtomicXor: out.debug << "imageAtomicXor"; break;
case EOpImageAtomicExchange: out.debug << "imageAtomicExchange"; break;
case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break;
#ifdef AMD_EXTENSIONS
case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break;
case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break;
case EOpImageLoadLod: out.debug << "imageLoadLod"; break;
case EOpImageStoreLod: out.debug << "imageStoreLod"; break;
#endif
case EOpTextureQuerySize: out.debug << "textureSize"; break;
case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
@ -924,11 +943,9 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break;
case EOpTextureGradClamp: out.debug << "textureGradClamp"; break;
case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break;
#ifdef AMD_EXTENSIONS
case EOpTextureGatherLod: out.debug << "textureGatherLod"; break;
case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break;
case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break;
#endif
case EOpSparseTexture: out.debug << "sparseTexture"; break;
case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break;
@ -946,13 +963,15 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break;
case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break;
case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break;
#ifdef AMD_EXTENSIONS
case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break;
case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break;
case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break;
case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break;
#endif
case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break;
case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break;
case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break;
case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break;
case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break;
case EOpAddCarry: out.debug << "addCarry"; break;
case EOpSubBorrow: out.debug << "subBorrow"; break;
case EOpUMulExtended: out.debug << "uMulExtended"; break;
@ -966,9 +985,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break;
case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break;
#ifdef AMD_EXTENSIONS
case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break;
#endif
case EOpSinCos: out.debug << "sincos"; break;
case EOpGenMul: out.debug << "mul"; break;
@ -1035,9 +1052,45 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break;
case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break;
case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break;
case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break;
case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break;
case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break;
case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break;
case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break;
case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break;
case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break;
case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break;
case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break;
case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break;
case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break;
case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break;
case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break;
case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
case EOpTraceNV: out.debug << "traceNV"; break;
case EOpReportIntersectionNV: out.debug << "reportIntersectionNV"; break;
case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break;
case EOpTerminateRayNV: out.debug << "terminateRayNV"; break;
case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break;
case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break;
case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break;
case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break;
case EOpCooperativeMatrixMulAdd: out.debug << "MulAdd cooperative matrices"; break;
case EOpIsHelperInvocation: out.debug << "IsHelperInvocation"; break;
default: out.debug.message(EPrefixError, "Bad aggregation op");
}
@ -1129,9 +1182,12 @@ static void OutputDouble(TInfoSink& out, double value, TOutputTraverser::EExtraO
switch (extra) {
case TOutputTraverser::BinaryDoubleOutput:
{
uint64_t b;
static_assert(sizeof(b) == sizeof(value), "sizeof(uint64_t) != sizeof(double)");
memcpy(&b, &value, sizeof(b));
out.debug << " : ";
long long b = *reinterpret_cast<long long*>(&value);
for (int i = 0; i < 8 * sizeof(value); ++i, ++b) {
for (size_t i = 0; i < 8 * sizeof(value); ++i, ++b) {
out.debug << ((b & 0x8000000000000000) != 0 ? "1" : "0");
b <<= 1;
}
@ -1329,6 +1385,7 @@ bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node)
case EOpContinue: out.debug << "Branch: Continue"; break;
case EOpReturn: out.debug << "Branch: Return"; break;
case EOpCase: out.debug << "case: "; break;
case EOpDemote: out.debug << "Demote"; break;
case EOpDefault: out.debug << "default: "; break;
default: out.debug << "Branch: Unknown Branch"; break;
}
@ -1439,8 +1496,17 @@ void TIntermediate::output(TInfoSink& infoSink, bool tree)
}
infoSink.debug << "\n";
}
if (interlockOrdering != EioNone)
infoSink.debug << "interlock ordering = " << TQualifier::getInterlockOrderingString(interlockOrdering) << "\n";
break;
case EShLangMeshNV:
infoSink.debug << "max_vertices = " << vertices << "\n";
infoSink.debug << "max_primitives = " << primitives << "\n";
infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
// Fall through
case EShLangTaskNV:
// Fall through
case EShLangCompute:
infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n";
{
@ -1469,3 +1535,5 @@ void TIntermediate::output(TInfoSink& infoSink, bool tree)
}
} // end namespace glslang
#endif // not GLSLANG_WEB

File diff suppressed because it is too large Load Diff

View File

@ -33,11 +33,15 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#ifndef _IOMAPPER_INCLUDED
#define _IOMAPPER_INCLUDED
#include "../Public/ShaderLang.h"
#include <cstdint>
#include "LiveTraverser.h"
#include <unordered_map>
#include <unordered_set>
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
@ -47,17 +51,249 @@ class TInfoSink;
namespace glslang {
class TIntermediate;
struct TVarEntryInfo {
int id;
TIntermSymbol* symbol;
bool live;
int newBinding;
int newSet;
int newLocation;
int newComponent;
int newIndex;
EShLanguage stage;
struct TOrderById {
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { return l.id < r.id; }
};
struct TOrderByPriority {
// ordering:
// 1) has both binding and set
// 2) has binding but no set
// 3) has no binding but set
// 4) has no binding and no set
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) {
const TQualifier& lq = l.symbol->getQualifier();
const TQualifier& rq = r.symbol->getQualifier();
// simple rules:
// has binding gives 2 points
// has set gives 1 point
// who has the most points is more important.
int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
if (lPoints == rPoints)
return l.id < r.id;
return lPoints > rPoints;
}
};
};
// Base class for shared TIoMapResolver services, used by several derivations.
struct TDefaultIoResolverBase : public glslang::TIoMapResolver {
public:
TDefaultIoResolverBase(const TIntermediate& intermediate);
typedef std::vector<int> TSlotSet;
typedef std::unordered_map<int, TSlotSet> TSlotSetMap;
// grow the reflection stage by stage
void notifyBinding(EShLanguage, TVarEntryInfo& /*ent*/) override {}
void notifyInOut(EShLanguage, TVarEntryInfo& /*ent*/) override {}
void beginNotifications(EShLanguage) override {}
void endNotifications(EShLanguage) override {}
void beginResolve(EShLanguage) override {}
void endResolve(EShLanguage) override {}
void beginCollect(EShLanguage) override {}
void endCollect(EShLanguage) override {}
void reserverResourceSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {}
void reserverStorageSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {}
int getBaseBinding(TResourceType res, unsigned int set) const;
const std::vector<std::string>& getResourceSetBinding() const;
virtual TResourceType getResourceType(const glslang::TType& type) = 0;
bool doAutoBindingMapping() const;
bool doAutoLocationMapping() const;
TSlotSet::iterator findSlot(int set, int slot);
bool checkEmpty(int set, int slot);
bool validateInOut(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; }
int reserveSlot(int set, int slot, int size = 1);
int getFreeSlot(int set, int base, int size = 1);
int resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override;
int resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
void addStage(EShLanguage stage) override {
if (stage < EShLangCount)
stageMask[stage] = true;
}
uint32_t computeTypeLocationSize(const TType& type, EShLanguage stage);
TSlotSetMap slots;
protected:
TDefaultIoResolverBase(TDefaultIoResolverBase&);
TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&);
const TIntermediate& intermediate;
int nextUniformLocation;
int nextInputLocation;
int nextOutputLocation;
bool stageMask[EShLangCount + 1];
// Return descriptor set specific base if there is one, and the generic base otherwise.
int selectBaseBinding(int base, int descriptorSetBase) const {
return descriptorSetBase != -1 ? descriptorSetBase : base;
}
static int getLayoutSet(const glslang::TType& type) {
if (type.getQualifier().hasSet())
return type.getQualifier().layoutSet;
else
return 0;
}
static bool isSamplerType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler();
}
static bool isTextureType(const glslang::TType& type) {
return (type.getBasicType() == glslang::EbtSampler &&
(type.getSampler().isTexture() || type.getSampler().isSubpass()));
}
static bool isUboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqUniform;
}
static bool isImageType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage();
}
static bool isSsboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a SRV (shader resource view) type:
static bool isSrvType(const glslang::TType& type) {
return isTextureType(type) || type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a UAV (unordered access view) type:
static bool isUavType(const glslang::TType& type) {
if (type.getQualifier().isReadOnly())
return false;
return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) ||
(type.getQualifier().storage == EvqBuffer);
}
};
// Defaulf I/O resolver for OpenGL
struct TDefaultGlslIoResolver : public TDefaultIoResolverBase {
public:
typedef std::map<TString, int> TVarSlotMap; // <resourceName, location/binding>
typedef std::map<int, TVarSlotMap> TSlotMap; // <resourceKey, TVarSlotMap>
TDefaultGlslIoResolver(const TIntermediate& intermediate);
bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; }
TResourceType getResourceType(const glslang::TType& type) override;
int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override;
int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
void beginResolve(EShLanguage /*stage*/) override;
void endResolve(EShLanguage stage) override;
void beginCollect(EShLanguage) override;
void endCollect(EShLanguage) override;
void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
// in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol.
// We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage.
// if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key.
// Note: both stage and type must less then 0xffff.
int buildStorageKey(EShLanguage stage, TStorageQualifier type) {
assert(static_cast<uint32_t>(stage) <= 0x0000ffff && static_cast<uint32_t>(type) <= 0x0000ffff);
return (stage << 16) | type;
}
protected:
// Use for mark pre stage, to get more interface symbol information.
EShLanguage preStage;
// Use for mark current shader stage for resolver
EShLanguage currentStage;
// Slot map for storage resource(location of uniform and interface symbol) It's a program share slot
TSlotMap resourceSlotMap;
// Slot map for other resource(image, ubo, ssbo), It's a program share slot.
TSlotMap storageSlotMap;
};
typedef std::map<TString, TVarEntryInfo> TVarLiveMap;
// override function "operator=", if a vector<const _Kty, _Ty> being sort,
// when use vc++, the sort function will call :
// pair& operator=(const pair<_Other1, _Other2>& _Right)
// {
// first = _Right.first;
// second = _Right.second;
// return (*this);
// }
// that will make a const type handing on left.
// override this function can avoid a compiler error.
// In the future, if the vc++ compiler can handle such a situation,
// this part of the code will be removed.
struct TVarLivePair : std::pair<const TString, TVarEntryInfo> {
TVarLivePair(std::pair<const TString, TVarEntryInfo>& _Right) : pair(_Right.first, _Right.second) {}
TVarLivePair& operator=(const TVarLivePair& _Right) {
const_cast<TString&>(first) = _Right.first;
second = _Right.second;
return (*this);
}
};
typedef std::vector<TVarLivePair> TVarLiveVector;
// I/O mapper
class TIoMapper {
public:
TIoMapper() {}
virtual ~TIoMapper() {}
// grow the reflection stage by stage
bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; }
};
// I/O mapper for OpenGL
class TGlslIoMapper : public TIoMapper {
public:
TGlslIoMapper() {
memset(inVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(outVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(intermediates, 0, sizeof(TIntermediate*) * (EShLangCount + 1));
}
virtual ~TGlslIoMapper() {
for (size_t stage = 0; stage < EShLangCount; stage++) {
if (inVarMaps[stage] != nullptr) {
delete inVarMaps[stage];
inVarMaps[stage] = nullptr;
}
if (outVarMaps[stage] != nullptr) {
delete outVarMaps[stage];
outVarMaps[stage] = nullptr;
}
if (uniformVarMap[stage] != nullptr) {
delete uniformVarMap[stage];
uniformVarMap[stage] = nullptr;
}
if (intermediates[stage] != nullptr)
intermediates[stage] = nullptr;
}
}
// grow the reflection stage by stage
bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*) override;
bool doMap(TIoMapResolver*, TInfoSink&) override;
TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount],
*uniformVarMap[EShLangCount];
TIntermediate* intermediates[EShLangCount];
bool hadError = false;
};
} // end namespace glslang
#endif // _IOMAPPER_INCLUDED
#endif // GLSLANG_WEB

View File

@ -187,12 +187,14 @@ bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
//
void TParseContext::constantIndexExpressionCheck(TIntermNode* index)
{
#ifndef GLSLANG_WEB
TIndexTraverser it(inductiveLoopIds);
index->traverse(&it);
if (it.bad)
error(it.badLoc, "Non-constant-index-expression", "limitations", "");
#endif
}
} // end namespace glslang

View File

@ -1,6 +1,7 @@
//
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -55,8 +56,10 @@ namespace glslang {
//
void TIntermediate::error(TInfoSink& infoSink, const char* message)
{
#ifndef GLSLANG_WEB
infoSink.info.prefix(EPrefixError);
infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
#endif
++numErrors;
}
@ -64,8 +67,10 @@ void TIntermediate::error(TInfoSink& infoSink, const char* message)
// Link-time warning.
void TIntermediate::warn(TInfoSink& infoSink, const char* message)
{
#ifndef GLSLANG_WEB
infoSink.info.prefix(EPrefixWarning);
infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
#endif
}
// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
@ -77,12 +82,15 @@ void TIntermediate::warn(TInfoSink& infoSink, const char* message)
//
void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
{
if (source == EShSourceNone)
source = unit.source;
if (source != unit.source)
error(infoSink, "can't link compilation units from different source languages");
#ifndef GLSLANG_WEB
mergeCallGraphs(infoSink, unit);
mergeModes(infoSink, unit);
mergeTrees(infoSink, unit);
#endif
}
void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
{
if (unit.getNumEntryPoints() > 0) {
if (getNumEntryPoints() > 0)
error(infoSink, "can't handle multiple entry points per stage");
@ -92,25 +100,71 @@ void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
}
}
numEntryPoints += unit.getNumEntryPoints();
callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
}
#ifndef GLSLANG_WEB
#define MERGE_MAX(member) member = std::max(member, unit.member)
#define MERGE_TRUE(member) if (unit.member) member = unit.member;
void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
{
if (language != unit.language)
error(infoSink, "stages must match when linking into a single stage");
if (getSource() == EShSourceNone)
setSource(unit.getSource());
if (getSource() != unit.getSource())
error(infoSink, "can't link compilation units from different source languages");
if (treeRoot == nullptr) {
profile = unit.profile;
version = unit.version;
requestedExtensions = unit.requestedExtensions;
} else {
if ((isEsProfile()) != (unit.isEsProfile()))
error(infoSink, "Cannot cross link ES and desktop profiles");
else if (unit.profile == ECompatibilityProfile)
profile = ECompatibilityProfile;
version = std::max(version, unit.version);
requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
}
MERGE_MAX(spvVersion.spv);
MERGE_MAX(spvVersion.vulkanGlsl);
MERGE_MAX(spvVersion.vulkan);
MERGE_MAX(spvVersion.openGl);
numErrors += unit.getNumErrors();
numPushConstants += unit.numPushConstants;
callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
error(infoSink, "gl_FragCoord redeclarations must match across shaders");
if (unit.invocations != TQualifier::layoutNotSet) {
if (invocations == TQualifier::layoutNotSet)
invocations = unit.invocations;
else if (invocations != unit.invocations)
error(infoSink, "number of invocations must match between compilation units");
}
if (! earlyFragmentTests)
earlyFragmentTests = unit.earlyFragmentTests;
if (!postDepthCoverage)
postDepthCoverage = unit.postDepthCoverage;
if (depthLayout == EldNone)
depthLayout = unit.depthLayout;
else if (depthLayout != unit.depthLayout)
error(infoSink, "Contradictory depth layouts");
blendEquations |= unit.blendEquations;
if (vertices == TQualifier::layoutNotSet)
vertices = unit.vertices;
else if (vertices != unit.vertices) {
if (language == EShLangGeometry || language == EShLangMeshNV)
error(infoSink, "Contradictory layout max_vertices values");
else if (language == EShLangTessControl)
error(infoSink, "Contradictory layout vertices values");
else
assert(0);
}
if (primitives == TQualifier::layoutNotSet)
primitives = unit.primitives;
else if (primitives != unit.primitives) {
if (language == EShLangMeshNV)
error(infoSink, "Contradictory layout max_primitives values");
else
assert(0);
}
if (inputPrimitive == ElgNone)
inputPrimitive = unit.inputPrimitive;
@ -122,16 +176,8 @@ void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
else if (outputPrimitive != unit.outputPrimitive)
error(infoSink, "Contradictory output layout primitives");
if (vertices == TQualifier::layoutNotSet)
vertices = unit.vertices;
else if (vertices != unit.vertices) {
if (language == EShLangGeometry)
error(infoSink, "Contradictory layout max_vertices values");
else if (language == EShLangTessControl)
error(infoSink, "Contradictory layout vertices values");
else
assert(0);
}
if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
error(infoSink, "gl_FragCoord redeclarations must match across shaders");
if (vertexSpacing == EvsNone)
vertexSpacing = unit.vertexSpacing;
@ -143,63 +189,222 @@ void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
else if (vertexOrder != unit.vertexOrder)
error(infoSink, "Contradictory triangle ordering");
if (unit.pointMode)
pointMode = true;
MERGE_TRUE(pointMode);
for (int i = 0; i < 3; ++i) {
if (localSize[i] > 1)
if (!localSizeNotDefault[i] && unit.localSizeNotDefault[i]) {
localSize[i] = unit.localSize[i];
localSizeNotDefault[i] = true;
}
else if (localSize[i] != unit.localSize[i])
error(infoSink, "Contradictory local size");
if (localSizeSpecId[i] != TQualifier::layoutNotSet)
if (localSizeSpecId[i] == TQualifier::layoutNotSet)
localSizeSpecId[i] = unit.localSizeSpecId[i];
else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
error(infoSink, "Contradictory local size specialization ids");
}
if (unit.xfbMode)
xfbMode = true;
MERGE_TRUE(earlyFragmentTests);
MERGE_TRUE(postDepthCoverage);
if (depthLayout == EldNone)
depthLayout = unit.depthLayout;
else if (depthLayout != unit.depthLayout)
error(infoSink, "Contradictory depth layouts");
MERGE_TRUE(depthReplacing);
MERGE_TRUE(hlslFunctionality1);
blendEquations |= unit.blendEquations;
MERGE_TRUE(xfbMode);
for (size_t b = 0; b < xfbBuffers.size(); ++b) {
if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
error(infoSink, "Contradictory xfb_stride");
xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
if (unit.xfbBuffers[b].containsDouble)
xfbBuffers[b].containsDouble = true;
if (unit.xfbBuffers[b].contains64BitType)
xfbBuffers[b].contains64BitType = true;
if (unit.xfbBuffers[b].contains32BitType)
xfbBuffers[b].contains32BitType = true;
if (unit.xfbBuffers[b].contains16BitType)
xfbBuffers[b].contains16BitType = true;
// TODO: 4.4 link: enhanced layouts: compare ranges
}
if (unit.treeRoot == 0)
MERGE_TRUE(multiStream);
MERGE_TRUE(layoutOverrideCoverage);
MERGE_TRUE(geoPassthroughEXT);
for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
if (unit.shiftBinding[i] > 0)
setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
}
for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
setShiftBindingForSet((TResourceType)i, it->second, it->first);
}
resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
MERGE_TRUE(autoMapBindings);
MERGE_TRUE(autoMapLocations);
MERGE_TRUE(invertY);
MERGE_TRUE(flattenUniformArrays);
MERGE_TRUE(useUnknownFormat);
MERGE_TRUE(hlslOffsets);
MERGE_TRUE(useStorageBuffer);
MERGE_TRUE(hlslIoMapping);
// TODO: sourceFile
// TODO: sourceText
// TODO: processes
MERGE_TRUE(needToLegalize);
MERGE_TRUE(binaryDoubleOutput);
MERGE_TRUE(usePhysicalStorageBuffer);
}
//
// Merge the 'unit' AST into 'this' AST.
// That includes rationalizing the unique IDs, which were set up independently,
// and might have overlaps that are not the same symbol, or might have different
// IDs for what should be the same shared symbol.
//
void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
{
if (unit.treeRoot == nullptr)
return;
if (treeRoot == 0) {
if (treeRoot == nullptr) {
treeRoot = unit.treeRoot;
version = unit.version;
requestedExtensions = unit.requestedExtensions;
return;
}
// Getting this far means we have two existing trees to merge...
version = std::max(version, unit.version);
requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
numTaskNVBlocks += unit.numTaskNVBlocks;
// Get the top-level globals of each unit
TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
// Get the linker-object lists
TIntermSequence& linkerObjects = findLinkerObjects();
TIntermSequence& unitLinkerObjects = unit.findLinkerObjects();
TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
// Map by global name to unique ID to rationalize the same object having
// differing IDs in different trees.
TMap<TString, int> idMap;
int maxId;
seedIdMap(idMap, maxId);
remapIds(idMap, maxId + 1, unit);
mergeBodies(infoSink, globals, unitGlobals);
mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects);
ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
}
#endif
// Traverser that seeds an ID map with all built-ins, and tracks the
// maximum ID used.
// (It would be nice to put this in a function, but that causes warnings
// on having no bodies for the copy-constructor/operator=.)
class TBuiltInIdTraverser : public TIntermTraverser {
public:
TBuiltInIdTraverser(TMap<TString, int>& idMap) : idMap(idMap), maxId(0) { }
// If it's a built in, add it to the map.
// Track the max ID.
virtual void visitSymbol(TIntermSymbol* symbol)
{
const TQualifier& qualifier = symbol->getType().getQualifier();
if (qualifier.builtIn != EbvNone)
idMap[symbol->getName()] = symbol->getId();
maxId = std::max(maxId, symbol->getId());
}
int getMaxId() const { return maxId; }
protected:
TBuiltInIdTraverser(TBuiltInIdTraverser&);
TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
TMap<TString, int>& idMap;
int maxId;
};
// Traverser that seeds an ID map with non-builtins.
// (It would be nice to put this in a function, but that causes warnings
// on having no bodies for the copy-constructor/operator=.)
class TUserIdTraverser : public TIntermTraverser {
public:
TUserIdTraverser(TMap<TString, int>& idMap) : idMap(idMap) { }
// If its a non-built-in global, add it to the map.
virtual void visitSymbol(TIntermSymbol* symbol)
{
const TQualifier& qualifier = symbol->getType().getQualifier();
if (qualifier.builtIn == EbvNone)
idMap[symbol->getName()] = symbol->getId();
}
protected:
TUserIdTraverser(TUserIdTraverser&);
TUserIdTraverser& operator=(TUserIdTraverser&);
TMap<TString, int>& idMap; // over biggest id
};
// Initialize the the ID map with what we know of 'this' AST.
void TIntermediate::seedIdMap(TMap<TString, int>& idMap, int& maxId)
{
// all built-ins everywhere need to align on IDs and contribute to the max ID
TBuiltInIdTraverser builtInIdTraverser(idMap);
treeRoot->traverse(&builtInIdTraverser);
maxId = builtInIdTraverser.getMaxId();
// user variables in the linker object list need to align on ids
TUserIdTraverser userIdTraverser(idMap);
findLinkerObjects()->traverse(&userIdTraverser);
}
// Traverser to map an AST ID to what was known from the seeding AST.
// (It would be nice to put this in a function, but that causes warnings
// on having no bodies for the copy-constructor/operator=.)
class TRemapIdTraverser : public TIntermTraverser {
public:
TRemapIdTraverser(const TMap<TString, int>& idMap, int idShift) : idMap(idMap), idShift(idShift) { }
// Do the mapping:
// - if the same symbol, adopt the 'this' ID
// - otherwise, ensure a unique ID by shifting to a new space
virtual void visitSymbol(TIntermSymbol* symbol)
{
const TQualifier& qualifier = symbol->getType().getQualifier();
bool remapped = false;
if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
auto it = idMap.find(symbol->getName());
if (it != idMap.end()) {
symbol->changeId(it->second);
remapped = true;
}
}
if (!remapped)
symbol->changeId(symbol->getId() + idShift);
}
protected:
TRemapIdTraverser(TRemapIdTraverser&);
TRemapIdTraverser& operator=(TRemapIdTraverser&);
const TMap<TString, int>& idMap;
int idShift;
};
void TIntermediate::remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate& unit)
{
// Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
TRemapIdTraverser idTraverser(idMap, idShift);
unit.getTreeRoot()->traverse(&idTraverser);
}
//
// Merge the function bodies and global-level initializers from unitGlobals into globals.
// Will error check duplication of function bodies for the same signature.
@ -293,6 +498,7 @@ void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
//
void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
{
#ifndef GLSLANG_WEB
bool writeTypeComparison = false;
// Types have to match
@ -327,7 +533,7 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
}
// Precise...
if (! crossStage && symbol.getQualifier().noContraction != unitSymbol.getQualifier().noContraction) {
if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) {
error(infoSink, "Presence of precise qualifier must match:");
writeTypeComparison = true;
}
@ -336,19 +542,24 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid ||
symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth ||
symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
symbol.getQualifier().sample != unitSymbol.getQualifier().sample ||
symbol.getQualifier().patch != unitSymbol.getQualifier().patch ||
symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) {
symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() ||
symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() ||
symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective()) {
error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
writeTypeComparison = true;
}
// Memory...
if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent ||
symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent ||
symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent ||
symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate ||
symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
error(infoSink, "Memory qualifiers must match:");
writeTypeComparison = true;
}
@ -381,6 +592,7 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
if (writeTypeComparison)
infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" <<
unitSymbol.getType().getCompleteString() << "\"\n";
#endif
}
//
@ -395,15 +607,12 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
return;
if (numEntryPoints < 1) {
if (source == EShSourceGlsl)
if (getSource() == EShSourceGlsl)
error(infoSink, "Missing entry point: Each stage requires one entry point");
else
warn(infoSink, "Entry point not found");
}
if (numPushConstants > 1)
error(infoSink, "Only one push_constant block is allowed per stage");
// recursion and missing body checking
checkCallGraphCycles(infoSink);
checkCallGraphBodies(infoSink, keepUncalled);
@ -411,6 +620,10 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
// overlap/alias/missing I/O, etc.
inOutLocationCheck(infoSink);
#ifndef GLSLANG_WEB
if (getNumPushConstants() > 1)
error(infoSink, "Only one push_constant block is allowed per stage");
// invocations
if (invocations == TQualifier::layoutNotSet)
invocations = 1;
@ -426,8 +639,12 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
for (size_t b = 0; b < xfbBuffers.size(); ++b) {
if (xfbBuffers[b].containsDouble)
if (xfbBuffers[b].contains64BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 8);
else if (xfbBuffers[b].contains32BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 4);
else if (xfbBuffers[b].contains16BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 2);
// "It is a compile-time or link-time error to have
// any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
@ -442,17 +659,24 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
// "If the buffer is capturing any
// outputs with double-precision components, the stride must be a multiple of 8, otherwise it must be a
// outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
// multiple of 4, or a compile-time or link-time error results."
if (xfbBuffers[b].containsDouble && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double:");
if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
} else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
} else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
error(infoSink, "xfb_stride must be multiple of 4:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
// "If the buffer is capturing any
// outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
// "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
@ -471,7 +695,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
error(infoSink, "At least one shader must specify an output layout(vertices=...)");
break;
case EShLangTessEvaluation:
if (source == EShSourceGlsl) {
if (getSource() == EShSourceGlsl) {
if (inputPrimitive == ElgNone)
error(infoSink, "At least one shader must specify an input layout primitive");
if (vertexSpacing == EvsNone)
@ -483,17 +707,9 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
case EShLangGeometry:
if (inputPrimitive == ElgNone)
error(infoSink, "At least one shader must specify an input layout primitive");
if (outputPrimitive == ElgNone
#ifdef NV_EXTENSIONS
&& !getGeoPassthroughEXT()
#endif
)
if (outputPrimitive == ElgNone)
error(infoSink, "At least one shader must specify an output layout primitive");
if (vertices == TQualifier::layoutNotSet
#ifdef NV_EXTENSIONS
&& !getGeoPassthroughEXT()
#endif
)
if (vertices == TQualifier::layoutNotSet)
error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
break;
case EShLangFragment:
@ -505,6 +721,38 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
break;
case EShLangCompute:
break;
case EShLangRayGenNV:
case EShLangIntersectNV:
case EShLangAnyHitNV:
case EShLangClosestHitNV:
case EShLangMissNV:
case EShLangCallableNV:
if (numShaderRecordNVBlocks > 1)
error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
break;
case EShLangMeshNV:
// NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
if (outputPrimitive == ElgNone)
error(infoSink, "At least one shader must specify an output layout primitive");
if (vertices == TQualifier::layoutNotSet)
error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
if (primitives == TQualifier::layoutNotSet)
error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
// fall through
case EShLangTaskNV:
if (numTaskNVBlocks > 1)
error(infoSink, "Only one taskNV interface block is allowed per shader");
break;
default:
error(infoSink, "Unknown Stage.");
break;
@ -526,6 +774,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
} finalLinkTraverser;
treeRoot->traverse(&finalLinkTraverser);
#endif
}
//
@ -699,7 +948,7 @@ void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
// TODO: linker functionality: location collision checking
TIntermSequence& linkObjects = findLinkerObjects();
TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
for (size_t i = 0; i < linkObjects.size(); ++i) {
const TType& type = linkObjects[i]->getAsTyped()->getType();
const TQualifier& qualifier = type.getQualifier();
@ -712,13 +961,13 @@ void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
}
}
if (profile == EEsProfile) {
if (isEsProfile()) {
if (numFragOut > 1 && fragOutWithNoLocation)
error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
}
}
TIntermSequence& TIntermediate::findLinkerObjects() const
TIntermAggregate* TIntermediate::findLinkerObjects() const
{
// Get the top-level globals
TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
@ -726,7 +975,7 @@ TIntermSequence& TIntermediate::findLinkerObjects() const
// Get the last member of the sequences, expected to be the linker-object lists
assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
return globals.back()->getAsAggregate()->getSequence();
return globals.back()->getAsAggregate();
}
// See if a variable was both a user-declared output and used.
@ -734,7 +983,7 @@ TIntermSequence& TIntermediate::findLinkerObjects() const
// is more useful, and perhaps the spec should be changed to reflect that.
bool TIntermediate::userOutputUsed() const
{
const TIntermSequence& linkerObjects = findLinkerObjects();
const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
bool found = false;
for (size_t i = 0; i < linkerObjects.size(); ++i) {
@ -775,7 +1024,7 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
return -1;
int size;
if (qualifier.isUniformOrBuffer()) {
if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
if (type.isSizedArray())
size = type.getCumulativeArraySize();
else
@ -805,6 +1054,7 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
// So, for the case of dvec3, we need two independent ioRanges.
int collision = -1; // no collision
#ifndef GLSLANG_WEB
if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
(qualifier.isPipeInput() || qualifier.isPipeOutput())) {
// Dealing with dvec3 in/out split across two locations.
@ -831,7 +1081,9 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
if (collision < 0)
usedIo[set].push_back(range2);
}
} else {
} else
#endif
{
// Not a dvec3 in/out split across two locations, generic path.
// Need a single IO-range block.
@ -845,10 +1097,10 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
}
// combine location and component ranges
TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);
TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.getIndex() : 0);
// check for collisions, except for vertex inputs on desktop targeting OpenGL
if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
collision = checkLocationRange(set, range, type, typeCollision);
if (collision < 0)
@ -926,10 +1178,15 @@ int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
// TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
TType elementType(type, 0);
if (type.isSizedArray())
if (type.isSizedArray() && !type.getQualifier().isPerView())
return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
else
else {
#ifndef GLSLANG_WEB
// unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
elementType.getQualifier().perViewNV = false;
#endif
return computeTypeLocationSize(elementType, stage);
}
}
// "The locations consumed by block and structure members are determined by applying the rules above
@ -1003,6 +1260,8 @@ int TIntermediate::computeTypeUniformLocationSize(const TType& type)
return 1;
}
#ifndef GLSLANG_WEB
// Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
//
// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
@ -1015,7 +1274,7 @@ int TIntermediate::addXfbBufferOffset(const TType& type)
TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
// compute the range
unsigned int size = computeTypeXfbSize(type, buffer.containsDouble);
unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
@ -1034,11 +1293,13 @@ int TIntermediate::addXfbBufferOffset(const TType& type)
// Recursively figure out how many bytes of xfb buffer are used by the given type.
// Return the size of type, in bytes.
// Sets containsDouble to true if the type contains a double.
// N.B. Caller must set containsDouble to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& containsDouble) const
// Sets contains64BitType to true if the type contains a 64-bit data type.
// Sets contains32BitType to true if the type contains a 32-bit data type.
// Sets contains16BitType to true if the type contains a 16-bit data type.
// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
{
// "...if applied to an aggregate containing a double, the offset must also be a multiple of 8,
// "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8.
// ...within the qualified entity, subsequent components are each
// assigned, in order, to the next available offset aligned to a multiple of
@ -1049,29 +1310,45 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
assert(type.isSizedArray());
TType elementType(type, 0);
return type.getOuterArraySize() * computeTypeXfbSize(elementType, containsDouble);
return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
}
if (type.isStruct()) {
unsigned int size = 0;
bool structContainsDouble = false;
bool structContains64BitType = false;
bool structContains32BitType = false;
bool structContains16BitType = false;
for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
TType memberType(type, member);
// "... if applied to
// an aggregate containing a double, the offset must also be a multiple of 8,
// an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8."
bool memberContainsDouble = false;
int memberSize = computeTypeXfbSize(memberType, memberContainsDouble);
if (memberContainsDouble) {
structContainsDouble = true;
bool memberContains64BitType = false;
bool memberContains32BitType = false;
bool memberContains16BitType = false;
int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
if (memberContains64BitType) {
structContains64BitType = true;
RoundToPow2(size, 8);
} else if (memberContains32BitType) {
structContains32BitType = true;
RoundToPow2(size, 4);
} else if (memberContains16BitType) {
structContains16BitType = true;
RoundToPow2(size, 2);
}
size += memberSize;
}
if (structContainsDouble) {
containsDouble = true;
if (structContains64BitType) {
contains64BitType = true;
RoundToPow2(size, 8);
} else if (structContains32BitType) {
contains32BitType = true;
RoundToPow2(size, 4);
} else if (structContains16BitType) {
contains16BitType = true;
RoundToPow2(size, 2);
}
return size;
}
@ -1088,13 +1365,22 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
numComponents = 1;
}
if (type.getBasicType() == EbtDouble) {
containsDouble = true;
if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
contains64BitType = true;
return 8 * numComponents;
} else
} else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
contains16BitType = true;
return 2 * numComponents;
} else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
return numComponents;
else {
contains32BitType = true;
return 4 * numComponents;
}
}
#endif
const int baseAlignmentVec4Std140 = 16;
// Return the size and alignment of a component of the given type.
@ -1102,6 +1388,10 @@ const int baseAlignmentVec4Std140 = 16;
// Return value is the alignment..
int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
{
#ifdef GLSLANG_WEB
size = 4; return 4;
#endif
switch (type.getBasicType()) {
case EbtInt64:
case EbtUint64:
@ -1111,6 +1401,7 @@ int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
case EbtUint8: size = 1; return 1;
case EbtInt16:
case EbtUint16: size = 2; return 2;
case EbtReference: size = 8; return 8;
default: size = 4; return 4;
}
}
@ -1129,10 +1420,11 @@ int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
// stride comes from the flattening down to vectors.
//
// Return value is the alignment of the type.
int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, bool std140, bool rowMajor)
int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
{
int alignment;
bool std140 = layoutPacking == glslang::ElpStd140;
// When using the std140 storage layout, structures will be laid out in buffer
// storage with its members stored in monotonically increasing order based on their
// location in the declaration. A structure and each structure member have a base
@ -1196,7 +1488,7 @@ int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, b
if (type.isArray()) {
// TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
TType derefType(type, 0);
alignment = getBaseAlignment(derefType, size, dummyStride, std140, rowMajor);
alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
if (std140)
alignment = std::max(baseAlignmentVec4Std140, alignment);
RoundToPow2(size, alignment);
@ -1216,7 +1508,7 @@ int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, b
int memberSize;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, std140,
int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
(subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
maxAlignment = std::max(maxAlignment, memberAlignment);
RoundToPow2(size, memberAlignment);
@ -1255,7 +1547,7 @@ int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, b
// rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
TType derefType(type, 0, rowMajor);
alignment = getBaseAlignment(derefType, size, dummyStride, std140, rowMajor);
alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
if (std140)
alignment = std::max(baseAlignmentVec4Std140, alignment);
RoundToPow2(size, alignment);
@ -1283,4 +1575,149 @@ bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
: offset % 16 != 0;
}
int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
{
int alignment;
stride = 0;
int dummyStride;
if (type.isArray()) {
TType derefType(type, 0);
alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
stride = size;
RoundToPow2(stride, alignment);
size = stride * (type.getOuterArraySize() - 1) + size;
return alignment;
}
if (type.getBasicType() == EbtStruct) {
const TTypeList& memberList = *type.getStruct();
size = 0;
int maxAlignment = 0;
for (size_t m = 0; m < memberList.size(); ++m) {
int memberSize;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
(subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
maxAlignment = std::max(maxAlignment, memberAlignment);
RoundToPow2(size, memberAlignment);
size += memberSize;
}
return maxAlignment;
}
if (type.isScalar())
return getBaseAlignmentScalar(type, size);
if (type.isVector()) {
int scalarAlign = getBaseAlignmentScalar(type, size);
size *= type.getVectorSize();
return scalarAlign;
}
if (type.isMatrix()) {
TType derefType(type, 0, rowMajor);
alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
stride = size; // use intra-matrix stride for stride of a just a matrix
if (rowMajor)
size = stride * type.getMatrixRows();
else
size = stride * type.getMatrixCols();
return alignment;
}
assert(0); // all cases should be covered above
size = 1;
return 1;
}
int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
{
if (layoutPacking == glslang::ElpScalar) {
return getScalarAlignment(type, size, stride, rowMajor);
} else {
return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
}
}
// shared calculation by getOffset and getOffsets
void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
{
int dummyStride;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
parentType.getQualifier().layoutPacking,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: parentType.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
}
// Lookup or calculate the offset of a block member, using the recursively
// defined block offset rules.
int TIntermediate::getOffset(const TType& type, int index)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize = 0;
int offset = 0;
for (int m = 0; m <= index; ++m) {
updateOffset(type, *memberList[m].type, offset, memberSize);
if (m < index)
offset += memberSize;
}
return offset;
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int TIntermediate::getBlockSize(const TType& blockType)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
int lastMemberSize;
int dummyStride;
getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
return lastOffset + lastMemberSize;
}
int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
{
assert(type.isReference());
int size = getBlockSize(*type.getReferentType());
int align = type.getBufferReferenceAlignment();
if (align) {
size = (size + align - 1) & ~(align-1);
}
return size;
}
} // end namespace glslang

690
Externals/glslang/glslang/MachineIndependent/localintermediate.h vendored Executable file → Normal file
View File

@ -2,6 +2,8 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2016 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -41,6 +43,8 @@
#include "../Public/ShaderLang.h"
#include "Versions.h"
#include <string>
#include <vector>
#include <algorithm>
#include <set>
#include <array>
@ -143,14 +147,19 @@ struct TOffsetRange {
TRange offset;
};
#ifndef GLSLANG_WEB
// Things that need to be tracked per xfb buffer.
struct TXfbBuffer {
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), containsDouble(false) { }
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false),
contains32BitType(false), contains16BitType(false) { }
std::vector<TRange> ranges; // byte offsets that have already been assigned
unsigned int stride;
unsigned int implicitStride;
bool containsDouble;
bool contains64BitType;
bool contains32BitType;
bool contains16BitType;
};
#endif
// Track a set of strings describing how the module was processed.
// Using the form:
@ -204,174 +213,74 @@ class TSymbolTable;
class TSymbol;
class TVariable;
//
// Texture and Sampler transformation mode.
//
enum ComputeDerivativeMode {
LayoutDerivativeNone, // default layout as SPV_NV_compute_shader_derivatives not enabled
LayoutDerivativeGroupQuads, // derivative_group_quadsNV
LayoutDerivativeGroupLinear, // derivative_group_linearNV
};
//
// Set of helper functions to help parse and build the tree.
//
class TIntermediate {
public:
explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) :
implicitThisName("@this"), implicitCounterName("@count"),
language(l), source(EShSourceNone), profile(p), version(v), treeRoot(0),
language(l),
profile(p), version(v), treeRoot(0),
numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false),
invertY(false),
useStorageBuffer(false),
nanMinMaxClamp(false),
depthReplacing(false)
#ifndef GLSLANG_WEB
,
implicitThisName("@this"), implicitCounterName("@count"),
source(EShSourceNone),
useVulkanMemoryModel(false),
invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet),
inputPrimitive(ElgNone), outputPrimitive(ElgNone),
pixelCenterInteger(false), originUpperLeft(false),
vertexSpacing(EvsNone), vertexOrder(EvoNone), pointMode(false), earlyFragmentTests(false),
postDepthCoverage(false), depthLayout(EldNone), depthReplacing(false),
vertexSpacing(EvsNone), vertexOrder(EvoNone), interlockOrdering(EioNone), pointMode(false), earlyFragmentTests(false),
postDepthCoverage(false), depthLayout(EldNone),
hlslFunctionality1(false),
blendEquations(0), xfbMode(false), multiStream(false),
#ifdef NV_EXTENSIONS
layoutOverrideCoverage(false),
geoPassthroughEXT(false),
#endif
numShaderRecordNVBlocks(0),
computeDerivativeMode(LayoutDerivativeNone),
primitives(TQualifier::layoutNotSet),
numTaskNVBlocks(0),
autoMapBindings(false),
autoMapLocations(false),
invertY(false),
flattenUniformArrays(false),
useUnknownFormat(false),
hlslOffsets(false),
useStorageBuffer(false),
hlslIoMapping(false),
useVariablePointers(false),
textureSamplerTransformMode(EShTexSampTransKeep),
needToLegalize(false),
binaryDoubleOutput(false)
binaryDoubleOutput(false),
usePhysicalStorageBuffer(false),
uniformLocationBase(0)
#endif
{
localSize[0] = 1;
localSize[1] = 1;
localSize[2] = 1;
localSizeNotDefault[0] = false;
localSizeNotDefault[1] = false;
localSizeNotDefault[2] = false;
localSizeSpecId[0] = TQualifier::layoutNotSet;
localSizeSpecId[1] = TQualifier::layoutNotSet;
localSizeSpecId[2] = TQualifier::layoutNotSet;
#ifndef GLSLANG_WEB
xfbBuffers.resize(TQualifier::layoutXfbBufferEnd);
shiftBinding.fill(0);
#endif
}
void setLimits(const TBuiltInResource& r) { resources = r; }
bool postProcess(TIntermNode*, EShLanguage);
void output(TInfoSink&, bool tree);
void removeTree();
void setSource(EShSource s) { source = s; }
EShSource getSource() const { return source; }
void setEntryPointName(const char* ep)
{
entryPointName = ep;
processes.addProcess("entry-point");
processes.addArgument(entryPointName);
}
void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
const std::string& getEntryPointName() const { return entryPointName; }
const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
void setShiftBinding(TResourceType res, unsigned int shift)
{
shiftBinding[res] = shift;
const char* name = getResourceName(res);
if (name != nullptr)
processes.addIfNonZero(name, shift);
}
unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
{
if (shift == 0) // ignore if there's no shift: it's a no-op.
return;
shiftBindingForSet[res][set] = shift;
const char* name = getResourceName(res);
if (name != nullptr) {
processes.addProcess(name);
processes.addArgument(shift);
processes.addArgument(set);
}
}
int getShiftBindingForSet(TResourceType res, unsigned int set) const
{
const auto shift = shiftBindingForSet[res].find(set);
return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
}
bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
void setResourceSetBinding(const std::vector<std::string>& shift)
{
resourceSetBinding = shift;
if (shift.size() > 0) {
processes.addProcess("resource-set-binding");
for (int s = 0; s < (int)shift.size(); ++s)
processes.addArgument(shift[s]);
}
}
const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
void setAutoMapBindings(bool map)
{
autoMapBindings = map;
if (autoMapBindings)
processes.addProcess("auto-map-bindings");
}
bool getAutoMapBindings() const { return autoMapBindings; }
void setAutoMapLocations(bool map)
{
autoMapLocations = map;
if (autoMapLocations)
processes.addProcess("auto-map-locations");
}
bool getAutoMapLocations() const { return autoMapLocations; }
void setInvertY(bool invert)
{
invertY = invert;
if (invertY)
processes.addProcess("invert-y");
}
bool getInvertY() const { return invertY; }
void setFlattenUniformArrays(bool flatten)
{
flattenUniformArrays = flatten;
if (flattenUniformArrays)
processes.addProcess("flatten-uniform-arrays");
}
bool getFlattenUniformArrays() const { return flattenUniformArrays; }
void setNoStorageFormat(bool b)
{
useUnknownFormat = b;
if (useUnknownFormat)
processes.addProcess("no-storage-format");
}
bool getNoStorageFormat() const { return useUnknownFormat; }
void setHlslOffsets()
{
hlslOffsets = true;
if (hlslOffsets)
processes.addProcess("hlsl-offsets");
}
bool usingHlslOFfsets() const { return hlslOffsets; }
void setUseStorageBuffer()
{
useStorageBuffer = true;
processes.addProcess("use-storage-buffer");
}
bool usingStorageBuffer() const { return useStorageBuffer; }
void setHlslIoMapping(bool b)
{
hlslIoMapping = b;
if (hlslIoMapping)
processes.addProcess("hlsl-iomap");
}
bool usingHlslIoMapping() { return hlslIoMapping; }
template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
bool hasCounterBufferName(const TString& name) const {
size_t len = strlen(implicitCounterName);
return name.size() > len &&
name.compare(name.size() - len, len, implicitCounterName) == 0;
}
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
void setVersion(int v) { version = v; }
int getVersion() const { return version; }
@ -387,11 +296,49 @@ public:
if (spvVersion.openGl > 0)
processes.addProcess("client opengl100");
// target SPV
switch (spvVersion.spv) {
case 0:
break;
case EShTargetSpv_1_0:
break;
case EShTargetSpv_1_1:
processes.addProcess("target-env spirv1.1");
break;
case EShTargetSpv_1_2:
processes.addProcess("target-env spirv1.2");
break;
case EShTargetSpv_1_3:
processes.addProcess("target-env spirv1.3");
break;
case EShTargetSpv_1_4:
processes.addProcess("target-env spirv1.4");
break;
case EShTargetSpv_1_5:
processes.addProcess("target-env spirv1.5");
break;
default:
processes.addProcess("target-env spirvUnknown");
break;
}
// target-environment processes
if (spvVersion.vulkan > 0)
switch (spvVersion.vulkan) {
case 0:
break;
case EShTargetVulkan_1_0:
processes.addProcess("target-env vulkan1.0");
else if (spvVersion.vulkan > 0)
break;
case EShTargetVulkan_1_1:
processes.addProcess("target-env vulkan1.1");
break;
case EShTargetVulkan_1_2:
processes.addProcess("target-env vulkan1.2");
break;
default:
processes.addProcess("target-env vulkanUnknown");
break;
}
if (spvVersion.openGl > 0)
processes.addProcess("target-env opengl");
}
@ -406,15 +353,47 @@ public:
int getNumEntryPoints() const { return numEntryPoints; }
int getNumErrors() const { return numErrors; }
void addPushConstantCount() { ++numPushConstants; }
void setLimits(const TBuiltInResource& r) { resources = r; }
bool postProcess(TIntermNode*, EShLanguage);
void removeTree();
void setEntryPointName(const char* ep)
{
entryPointName = ep;
processes.addProcess("entry-point");
processes.addArgument(entryPointName);
}
void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
const std::string& getEntryPointName() const { return entryPointName; }
const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
void setInvertY(bool invert)
{
invertY = invert;
if (invertY)
processes.addProcess("invert-y");
}
bool getInvertY() const { return invertY; }
#ifdef ENABLE_HLSL
void setSource(EShSource s) { source = s; }
EShSource getSource() const { return source; }
#else
void setSource(EShSource s) { assert(s == EShSourceGlsl); }
EShSource getSource() const { return EShSourceGlsl; }
#endif
bool isRecursive() const { return recursive; }
TIntermSymbol* addSymbol(const TVariable&);
TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&);
TIntermSymbol* addSymbol(const TType&, const TSourceLoc&);
TIntermSymbol* addSymbol(const TIntermSymbol&);
TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*) const;
std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1) const;
TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*);
std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1);
TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*);
TIntermTyped* addConversion(TBasicType convertTo, TIntermTyped* node) const;
void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode);
TIntermTyped* addShapeConversion(const TType&, TIntermTyped*);
TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
@ -481,6 +460,169 @@ public:
void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
void setUseStorageBuffer()
{
useStorageBuffer = true;
processes.addProcess("use-storage-buffer");
}
bool usingStorageBuffer() const { return useStorageBuffer; }
void setDepthReplacing() { depthReplacing = true; }
bool isDepthReplacing() const { return depthReplacing; }
bool setLocalSize(int dim, int size)
{
if (localSizeNotDefault[dim])
return size == localSize[dim];
localSizeNotDefault[dim] = true;
localSize[dim] = size;
return true;
}
unsigned int getLocalSize(int dim) const { return localSize[dim]; }
bool setLocalSizeSpecId(int dim, int id)
{
if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
return id == localSizeSpecId[dim];
localSizeSpecId[dim] = id;
return true;
}
int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
#ifdef GLSLANG_WEB
void output(TInfoSink&, bool tree) { }
bool isEsProfile() const { return false; }
bool getXfbMode() const { return false; }
bool isMultiStream() const { return false; }
TLayoutGeometry getOutputPrimitive() const { return ElgNone; }
bool getPostDepthCoverage() const { return false; }
bool getEarlyFragmentTests() const { return false; }
TLayoutDepth getDepth() const { return EldNone; }
bool getPixelCenterInteger() const { return false; }
void setOriginUpperLeft() { }
bool getOriginUpperLeft() const { return true; }
TInterlockOrdering getInterlockOrdering() const { return EioNone; }
bool getAutoMapBindings() const { return false; }
bool getAutoMapLocations() const { return false; }
int getNumPushConstants() const { return 0; }
void addShaderRecordNVCount() { }
void addTaskNVCount() { }
void setUseVulkanMemoryModel() { }
bool usingVulkanMemoryModel() const { return false; }
bool usingPhysicalStorageBuffer() const { return false; }
bool usingVariablePointers() const { return false; }
unsigned getXfbStride(int buffer) const { return 0; }
bool hasLayoutDerivativeModeNone() const { return false; }
ComputeDerivativeMode getLayoutDerivativeModeNone() const { return LayoutDerivativeNone; }
#else
void output(TInfoSink&, bool tree);
bool isEsProfile() const { return profile == EEsProfile; }
void setShiftBinding(TResourceType res, unsigned int shift)
{
shiftBinding[res] = shift;
const char* name = getResourceName(res);
if (name != nullptr)
processes.addIfNonZero(name, shift);
}
unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
{
if (shift == 0) // ignore if there's no shift: it's a no-op.
return;
shiftBindingForSet[res][set] = shift;
const char* name = getResourceName(res);
if (name != nullptr) {
processes.addProcess(name);
processes.addArgument(shift);
processes.addArgument(set);
}
}
int getShiftBindingForSet(TResourceType res, unsigned int set) const
{
const auto shift = shiftBindingForSet[res].find(set);
return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
}
bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
void setResourceSetBinding(const std::vector<std::string>& shift)
{
resourceSetBinding = shift;
if (shift.size() > 0) {
processes.addProcess("resource-set-binding");
for (int s = 0; s < (int)shift.size(); ++s)
processes.addArgument(shift[s]);
}
}
const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
void setAutoMapBindings(bool map)
{
autoMapBindings = map;
if (autoMapBindings)
processes.addProcess("auto-map-bindings");
}
bool getAutoMapBindings() const { return autoMapBindings; }
void setAutoMapLocations(bool map)
{
autoMapLocations = map;
if (autoMapLocations)
processes.addProcess("auto-map-locations");
}
bool getAutoMapLocations() const { return autoMapLocations; }
#ifdef ENABLE_HLSL
void setFlattenUniformArrays(bool flatten)
{
flattenUniformArrays = flatten;
if (flattenUniformArrays)
processes.addProcess("flatten-uniform-arrays");
}
bool getFlattenUniformArrays() const { return flattenUniformArrays; }
#endif
void setNoStorageFormat(bool b)
{
useUnknownFormat = b;
if (useUnknownFormat)
processes.addProcess("no-storage-format");
}
bool getNoStorageFormat() const { return useUnknownFormat; }
void setUseVulkanMemoryModel()
{
useVulkanMemoryModel = true;
processes.addProcess("use-vulkan-memory-model");
}
bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; }
void setUsePhysicalStorageBuffer()
{
usePhysicalStorageBuffer = true;
}
bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; }
void setUseVariablePointers()
{
useVariablePointers = true;
processes.addProcess("use-variable-pointers");
}
bool usingVariablePointers() const { return useVariablePointers; }
#ifdef ENABLE_HLSL
template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
bool hasCounterBufferName(const TString& name) const {
size_t len = strlen(implicitCounterName);
return name.size() > len &&
name.compare(name.size() - len, len, implicitCounterName) == 0;
}
#endif
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
int getNumPushConstants() const { return numPushConstants; }
void addShaderRecordNVCount() { ++numShaderRecordNVBlocks; }
void addTaskNVCount() { ++numTaskNVBlocks; }
bool setInvocations(int i)
{
if (invocations != TQualifier::layoutNotSet)
@ -524,23 +666,14 @@ public:
void setPointMode() { pointMode = true; }
bool getPointMode() const { return pointMode; }
bool setLocalSize(int dim, int size)
bool setInterlockOrdering(TInterlockOrdering o)
{
if (localSize[dim] > 1)
return size == localSize[dim];
localSize[dim] = size;
if (interlockOrdering != EioNone)
return interlockOrdering == o;
interlockOrdering = o;
return true;
}
unsigned int getLocalSize(int dim) const { return localSize[dim]; }
bool setLocalSizeSpecId(int dim, int id)
{
if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
return id == localSizeSpecId[dim];
localSizeSpecId[dim] = id;
return true;
}
int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
TInterlockOrdering getInterlockOrdering() const { return interlockOrdering; }
void setXfbMode() { xfbMode = true; }
bool getXfbMode() const { return xfbMode; }
@ -554,14 +687,10 @@ public:
return true;
}
TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; }
void setOriginUpperLeft() { originUpperLeft = true; }
bool getOriginUpperLeft() const { return originUpperLeft; }
void setPixelCenterInteger() { pixelCenterInteger = true; }
bool getPixelCenterInteger() const { return pixelCenterInteger; }
void setEarlyFragmentTests() { earlyFragmentTests = true; }
bool getEarlyFragmentTests() const { return earlyFragmentTests; }
void setPostDepthCoverage() { postDepthCoverage = true; }
bool getPostDepthCoverage() const { return postDepthCoverage; }
void setEarlyFragmentTests() { earlyFragmentTests = true; }
bool getEarlyFragmentTests() const { return earlyFragmentTests; }
bool setDepth(TLayoutDepth d)
{
if (depthLayout != EldNone)
@ -570,19 +699,98 @@ public:
return true;
}
TLayoutDepth getDepth() const { return depthLayout; }
void setDepthReplacing() { depthReplacing = true; }
bool isDepthReplacing() const { return depthReplacing; }
void setHlslFunctionality1() { hlslFunctionality1 = true; }
bool getHlslFunctionality1() const { return hlslFunctionality1; }
void setOriginUpperLeft() { originUpperLeft = true; }
bool getOriginUpperLeft() const { return originUpperLeft; }
void setPixelCenterInteger() { pixelCenterInteger = true; }
bool getPixelCenterInteger() const { return pixelCenterInteger; }
void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); }
unsigned int getBlendEquations() const { return blendEquations; }
bool setXfbBufferStride(int buffer, unsigned stride)
{
if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
return xfbBuffers[buffer].stride == stride;
xfbBuffers[buffer].stride = stride;
return true;
}
unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
int addXfbBufferOffset(const TType&);
unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const;
unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const;
void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
bool getGeoPassthroughEXT() const { return geoPassthroughEXT; }
void setLayoutDerivativeMode(ComputeDerivativeMode mode) { computeDerivativeMode = mode; }
bool hasLayoutDerivativeModeNone() const { return computeDerivativeMode != LayoutDerivativeNone; }
ComputeDerivativeMode getLayoutDerivativeModeNone() const { return computeDerivativeMode; }
bool setPrimitives(int m)
{
if (primitives != TQualifier::layoutNotSet)
return primitives == m;
primitives = m;
return true;
}
int getPrimitives() const { return primitives; }
const char* addSemanticName(const TString& name)
{
return semanticNameSet.insert(name).first->c_str();
}
void addUniformLocationOverride(const char* nameStr, int location)
{
std::string name = nameStr;
uniformLocationOverrides[name] = location;
}
int getUniformLocationOverride(const char* nameStr) const
{
std::string name = nameStr;
auto pos = uniformLocationOverrides.find(name);
if (pos == uniformLocationOverrides.end())
return -1;
else
return pos->second;
}
void setUniformLocationBase(int base) { uniformLocationBase = base; }
int getUniformLocationBase() const { return uniformLocationBase; }
void setNeedsLegalization() { needToLegalize = true; }
bool needsLegalization() const { return needToLegalize; }
void setBinaryDoubleOutput() { binaryDoubleOutput = true; }
bool getBinaryDoubleOutput() { return binaryDoubleOutput; }
#endif // GLSLANG_WEB
#ifdef ENABLE_HLSL
void setHlslFunctionality1() { hlslFunctionality1 = true; }
bool getHlslFunctionality1() const { return hlslFunctionality1; }
void setHlslOffsets()
{
hlslOffsets = true;
if (hlslOffsets)
processes.addProcess("hlsl-offsets");
}
bool usingHlslOffsets() const { return hlslOffsets; }
void setHlslIoMapping(bool b)
{
hlslIoMapping = b;
if (hlslIoMapping)
processes.addProcess("hlsl-iomap");
}
bool usingHlslIoMapping() { return hlslIoMapping; }
#else
bool getHlslFunctionality1() const { return false; }
bool usingHlslOffsets() const { return false; }
bool usingHlslIoMapping() { return false; }
#endif
void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee);
void merge(TInfoSink&, TIntermediate&);
void finalCheck(TInfoSink&, bool keepUncalled);
bool buildConvertOp(TBasicType dst, TBasicType src, TOperator& convertOp) const;
TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const;
void addIoAccessed(const TString& name) { ioAccessed.insert(name); }
bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); }
@ -593,38 +801,27 @@ public:
static int computeTypeLocationSize(const TType&, EShLanguage);
static int computeTypeUniformLocationSize(const TType&);
bool setXfbBufferStride(int buffer, unsigned stride)
{
if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
return xfbBuffers[buffer].stride == stride;
xfbBuffers[buffer].stride = stride;
return true;
}
unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
int addXfbBufferOffset(const TType&);
unsigned int computeTypeXfbSize(const TType&, bool& containsDouble) const;
static int getBaseAlignmentScalar(const TType&, int& size);
static int getBaseAlignment(const TType&, int& size, int& stride, bool std140, bool rowMajor);
static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor);
static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static bool improperStraddle(const TType& type, int size, int offset);
static void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize);
static int getOffset(const TType& type, int index);
static int getBlockSize(const TType& blockType);
static int computeBufferReferenceTypeSize(const TType&);
bool promote(TIntermOperator*);
#ifdef NV_EXTENSIONS
void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
bool getGeoPassthroughEXT() const { return geoPassthroughEXT; }
#endif
const char* addSemanticName(const TString& name)
{
return semanticNameSet.insert(name).first->c_str();
}
void setNanMinMaxClamp(bool setting) { nanMinMaxClamp = setting; }
bool getNanMinMaxClamp() const { return nanMinMaxClamp; }
void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; }
const std::string& getSourceFile() const { return sourceFile; }
void addSourceText(const char* text) { sourceText = sourceText + text; }
void addSourceText(const char* text, size_t len) { sourceText.append(text, len); }
const std::string& getSourceText() const { return sourceText; }
void addProcesses(const std::vector<std::string>& p) {
const std::map<std::string, std::string>& getIncludeText() const { return includeText; }
void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); }
void addProcesses(const std::vector<std::string>& p)
{
for (int i = 0; i < (int)p.size(); ++i)
processes.addProcess(p[i]);
}
@ -632,19 +829,38 @@ public:
void addProcessArgument(const std::string& arg) { processes.addArgument(arg); }
const std::vector<std::string>& getProcesses() const { return processes.getProcesses(); }
void setNeedsLegalization() { needToLegalize = true; }
bool needsLegalization() const { return needToLegalize; }
// Certain explicit conversions are allowed conditionally
#ifdef GLSLANG_WEB
bool getArithemeticInt8Enabled() const { return false; }
bool getArithemeticInt16Enabled() const { return false; }
bool getArithemeticFloat16Enabled() const { return false; }
#else
bool getArithemeticInt8Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
}
bool getArithemeticInt16Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_AMD_gpu_shader_int16) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
}
void setBinaryDoubleOutput() { binaryDoubleOutput = true; }
bool getBinaryDoubleOutput() { return binaryDoubleOutput; }
const char* const implicitThisName;
const char* const implicitCounterName;
bool getArithemeticFloat16Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
}
#endif
protected:
TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&);
void error(TInfoSink& infoSink, const char*);
void warn(TInfoSink& infoSink, const char*);
void mergeCallGraphs(TInfoSink&, TIntermediate&);
void mergeModes(TInfoSink&, TIntermediate&);
void mergeTrees(TInfoSink&, TIntermediate&);
void seedIdMap(TMap<TString, int>& idMap, int& maxId);
void remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate&);
void mergeBodies(TInfoSink&, TIntermSequence& globals, const TIntermSequence& unitGlobals);
void mergeLinkerObjects(TInfoSink&, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects);
void mergeImplicitArraySizes(TType&, const TType&);
@ -652,7 +868,7 @@ protected:
void checkCallGraphCycles(TInfoSink&);
void checkCallGraphBodies(TInfoSink&, bool keepUncalled);
void inOutLocationCheck(TInfoSink&);
TIntermSequence& findLinkerObjects() const;
TIntermAggregate* findLinkerObjects() const;
bool userOutputUsed() const;
bool isSpecializationOperation(const TIntermOperator&) const;
bool isNonuniformPropagating(TOperator) const;
@ -665,15 +881,25 @@ protected:
bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&);
void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root);
bool isConversionAllowed(TOperator op, TIntermTyped* node) const;
TIntermUnary* createConversion(TBasicType convertTo, TIntermTyped* node) const;
std::tuple<TBasicType, TBasicType> getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const;
// JohnK: I think this function should go away.
// This data structure is just a log to pass on to back ends.
// Versioning and extensions are handled in Version.cpp, with a rich
// set of functions for querying stages, versions, extension enable/disabled, etc.
#ifdef GLSLANG_WEB
bool extensionRequested(const char *extension) const { return false; }
#else
bool extensionRequested(const char *extension) const {return requestedExtensions.find(extension) != requestedExtensions.end();}
#endif
static const char* getResourceName(TResourceType);
const EShLanguage language; // stage, known at construction time
EShSource source; // source language, known a bit later
std::string entryPointName;
std::string entryPointMangledName;
typedef std::list<TCall> TGraph;
TGraph callGraph;
EProfile profile; // source profile
int version; // source version
@ -685,6 +911,20 @@ protected:
int numErrors;
int numPushConstants;
bool recursive;
bool invertY;
bool useStorageBuffer;
bool nanMinMaxClamp; // true if desiring min/max/clamp to favor non-NaN over NaN
bool depthReplacing;
int localSize[3];
bool localSizeNotDefault[3];
int localSizeSpecId[3];
#ifndef GLSLANG_WEB
public:
const char* const implicitThisName;
const char* const implicitCounterName;
protected:
EShSource source; // source language, known a bit later
bool useVulkanMemoryModel;
int invocations;
int vertices;
TLayoutGeometry inputPrimitive;
@ -693,61 +933,65 @@ protected:
bool originUpperLeft;
TVertexSpacing vertexSpacing;
TVertexOrder vertexOrder;
TInterlockOrdering interlockOrdering;
bool pointMode;
int localSize[3];
int localSizeSpecId[3];
bool earlyFragmentTests;
bool postDepthCoverage;
TLayoutDepth depthLayout;
bool depthReplacing;
bool hlslFunctionality1;
int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift
bool xfbMode;
std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
bool multiStream;
#ifdef NV_EXTENSIONS
bool layoutOverrideCoverage;
bool geoPassthroughEXT;
#endif
int numShaderRecordNVBlocks;
ComputeDerivativeMode computeDerivativeMode;
int primitives;
int numTaskNVBlocks;
// Base shift values
std::array<unsigned int, EResCount> shiftBinding;
// Per-descriptor-set shift values
std::array<std::map<int, int>, EResCount> shiftBindingForSet;
std::array<std::map<int, int>, EResCount> shiftBindingForSet;
std::vector<std::string> resourceSetBinding;
bool autoMapBindings;
bool autoMapLocations;
bool invertY;
bool flattenUniformArrays;
bool useUnknownFormat;
bool hlslOffsets;
bool useStorageBuffer;
bool hlslIoMapping;
bool useVariablePointers;
typedef std::list<TCall> TGraph;
TGraph callGraph;
std::set<TString> ioAccessed; // set of names of statically read/written I/O that might need extra checking
std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
std::unordered_set<int> usedConstantId; // specialization constant ids used
std::set<TString> semanticNameSet;
EShTextureSamplerTransformMode textureSamplerTransformMode;
bool needToLegalize;
bool binaryDoubleOutput;
bool usePhysicalStorageBuffer;
std::unordered_map<std::string, int> uniformLocationOverrides;
int uniformLocationBase;
#endif
std::unordered_set<int> usedConstantId; // specialization constant ids used
std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
// set of names of statically read/written I/O that might need extra checking
std::set<TString> ioAccessed;
// source code of shader, useful as part of debug information
std::string sourceFile;
std::string sourceText;
// Included text. First string is a name, second is the included text
std::map<std::string, std::string> includeText;
// for OpModuleProcessed, or equivalent
TProcesses processes;
bool needToLegalize;
bool binaryDoubleOutput;
private:
void operator=(TIntermediate&); // prevent assignments
};

129
Externals/glslang/glslang/MachineIndependent/parseVersions.h vendored Executable file → Normal file
View File

@ -1,5 +1,5 @@
//
// Copyright (C) 2016 Google, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
@ -57,44 +57,131 @@ public:
TParseVersions(TIntermediate& interm, int version, EProfile profile,
const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink,
bool forwardCompatible, EShMessages messages)
: infoSink(infoSink), version(version), profile(profile), language(language),
spvVersion(spvVersion), forwardCompatible(forwardCompatible),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
:
#ifndef GLSLANG_WEB
forwardCompatible(forwardCompatible),
profile(profile),
#endif
infoSink(infoSink), version(version),
language(language),
spvVersion(spvVersion),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
virtual ~TParseVersions() { }
void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
#ifdef GLSLANG_WEB
const EProfile profile = EEsProfile;
bool isEsProfile() const { return true; }
void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (EEsProfile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
}
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc)
{
if ((EEsProfile & profileMask) && (minVersion == 0 || version < minVersion))
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
void initializeExtensionBehavior() { }
void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc) { }
void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc) { }
void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
TExtensionBehavior getExtensionBehavior(const char*) { return EBhMissing; }
bool extensionTurnedOn(const char* const extension) { return false; }
bool extensionsTurnedOn(int numExtensions, const char* const extensions[]) { return false; }
void updateExtensionBehavior(int line, const char* const extension, const char* behavior) { }
void updateExtensionBehavior(const char* const extension, TExtensionBehavior) { }
void checkExtensionStage(const TSourceLoc&, const char* const extension) { }
void fullIntegerCheck(const TSourceLoc&, const char* op) { }
void doubleCheck(const TSourceLoc&, const char* op) { }
bool float16Arithmetic() { return false; }
void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
bool int16Arithmetic() { return false; }
void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
bool int8Arithmetic() { return false; }
void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
void int64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
bool relaxedErrors() const { return false; }
bool suppressWarnings() const { return true; }
bool isForwardCompatible() const { return false; }
#else
bool forwardCompatible; // true if errors are to be given for use of deprecated features
EProfile profile; // the declared profile in the shader (core by default)
bool isEsProfile() const { return profile == EEsProfile; }
void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc);
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc);
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc);
virtual void initializeExtensionBehavior();
virtual void requireProfile(const TSourceLoc&, int queryProfiles, const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, const char* const extension, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual TExtensionBehavior getExtensionBehavior(const char*);
virtual bool extensionTurnedOn(const char* const extension);
virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void doubleCheck(const TSourceLoc&, const char* op);
virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
#ifdef AMD_EXTENSIONS
virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool float16Arithmetic();
virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int16Arithmetic();
virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int8Arithmetic();
virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
#endif
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void intcoopmatCheck(const TSourceLoc&, const char *op, bool builtIn = false);
bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
bool isForwardCompatible() const { return forwardCompatible; }
#endif // GLSLANG_WEB
virtual void spvRemoved(const TSourceLoc&, const char* op);
virtual void vulkanRemoved(const TSourceLoc&, const char* op);
virtual void requireVulkan(const TSourceLoc&, const char* op);
virtual void requireSpv(const TSourceLoc&, const char* op);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
#if defined(GLSLANG_WEB) && !defined(GLSLANG_WEB_DEVEL)
void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { addError(); }
void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { }
void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { addError(); }
void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { }
#else
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
@ -103,6 +190,7 @@ public:
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
#endif
void addError() { ++numErrors; }
int getNumErrors() const { return numErrors; }
@ -116,19 +204,20 @@ public:
void setCurrentString(int string) { currentScanner->setString(string); }
void getPreamble(std::string&);
bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
#ifdef ENABLE_HLSL
bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; }
bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; }
bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; }
#else
bool isReadingHLSL() const { return false; }
#endif
TInfoSink& infoSink;
// compilation mode
int version; // version, updated by #version in the shader
EProfile profile; // the declared profile in the shader (core by default)
EShLanguage language; // really the stage
SpvVersion spvVersion;
bool forwardCompatible; // true if errors are to be given for use of deprecated features
TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree
protected:

View File

@ -0,0 +1,35 @@
//
// Copyright (C) 2018 The Khronos Group Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "pch.h"

View File

@ -0,0 +1,49 @@
#ifndef _PCH_H
#define _PCH_H
//
// Copyright (C) 2018 The Khronos Group Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include <sstream>
#include <cstdlib>
#include <cstring>
#include <cctype>
#include <climits>
#include <iostream>
#include <sstream>
#include <memory>
#include "SymbolTable.h"
#include "ParseHelper.h"
#include "Scan.h"
#include "ScanContext.h"
#endif /* _PCH_H */

191
Externals/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp vendored Normal file → Executable file
View File

@ -1,6 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -109,11 +110,12 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
// save the macro name
const int defAtom = atomStrings.getAddAtom(ppToken->name);
TSourceLoc defineLoc = ppToken->loc; // because ppToken might go to the next line before we report errors
// gather parameters to the macro, between (...)
token = scanToken(ppToken);
if (token == '(' && ! ppToken->space) {
mac.emptyArgs = 1;
if (token == '(' && !ppToken->space) {
mac.functionLike = 1;
do {
token = scanToken(ppToken);
if (mac.args.size() == 0 && token == ')')
@ -123,7 +125,6 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
return token;
}
mac.emptyArgs = 0;
const int argAtom = atomStrings.getAddAtom(ppToken->name);
// check for duplication of parameter name
@ -146,10 +147,13 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
}
token = scanToken(ppToken);
} else if (token != '\n' && token != EndOfInput && !ppToken->space) {
parseContext.ppWarn(ppToken->loc, "missing space after macro name", "#define", "");
return token;
}
// record the definition of the macro
TSourceLoc defineLoc = ppToken->loc; // because ppToken is going to go to the next line before we report errors
while (token != '\n' && token != EndOfInput) {
mac.body.putToken(token, ppToken);
token = scanToken(ppToken);
@ -162,27 +166,43 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
if (existing != nullptr) {
if (! existing->undef) {
// Already defined -- need to make sure they are identical:
// "Two replacement lists are identical if and only if the preprocessing tokens in both have the same number,
// ordering, spelling, and white-space separation, where all white-space separations are considered identical."
if (existing->args.size() != mac.args.size() || existing->emptyArgs != mac.emptyArgs)
parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define", atomStrings.getString(defAtom));
else {
if (existing->args != mac.args)
parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", atomStrings.getString(defAtom));
// "Two replacement lists are identical if and only if the
// preprocessing tokens in both have the same number,
// ordering, spelling, and white-space separation, where all
// white-space separations are considered identical."
if (existing->functionLike != mac.functionLike) {
parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define",
atomStrings.getString(defAtom));
} else if (existing->args.size() != mac.args.size()) {
parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define",
atomStrings.getString(defAtom));
} else {
if (existing->args != mac.args) {
parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define",
atomStrings.getString(defAtom));
}
// set up to compare the two
existing->body.reset();
mac.body.reset();
int newToken;
bool firstToken = true;
do {
int oldToken;
TPpToken oldPpToken;
TPpToken newPpToken;
oldToken = existing->body.getToken(parseContext, &oldPpToken);
newToken = mac.body.getToken(parseContext, &newPpToken);
// for the first token, preceding spaces don't matter
if (firstToken) {
newPpToken.space = oldPpToken.space;
firstToken = false;
}
if (oldToken != newToken || oldPpToken != newPpToken) {
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", atomStrings.getString(defAtom));
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define",
atomStrings.getString(defAtom));
break;
}
} while (newToken > 0);
} while (newToken != EndOfInput);
}
}
*existing = mac;
@ -515,24 +535,28 @@ int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, boo
int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
{
while (token == PpAtomIdentifier && strcmp("defined", ppToken->name) != 0) {
int macroReturn = MacroExpand(ppToken, true, false);
if (macroReturn == 0) {
switch (MacroExpand(ppToken, true, false)) {
case MacroExpandNotStarted:
case MacroExpandError:
parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", "");
err = true;
res = 0;
token = scanToken(ppToken);
break;
}
if (macroReturn == -1) {
if (! shortCircuit && parseContext.profile == EEsProfile) {
case MacroExpandStarted:
break;
case MacroExpandUndef:
if (! shortCircuit && parseContext.isEsProfile()) {
const char* message = "undefined macro in expression not allowed in es profile";
if (parseContext.relaxedErrors())
parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
else
parseContext.ppError(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
}
break;
}
token = scanToken(ppToken);
if (err)
break;
}
return token;
@ -647,6 +671,7 @@ int TPpContext::CPPinclude(TPpToken* ppToken)
epilogue << (res->headerData[res->headerLength - 1] == '\n'? "" : "\n") <<
"#line " << directiveLoc.line + forNextLine << " " << directiveLoc.getStringNameOrNum() << "\n";
pushInput(new TokenizableIncludeFile(directiveLoc, prologue.str(), res, epilogue.str(), this));
parseContext.intermediate.addIncludeText(res->headerName.c_str(), res->headerData, res->headerLength);
// There's no "current" location anymore.
parseContext.setCurrentColumn(0);
} else {
@ -697,6 +722,7 @@ int TPpContext::CPPline(TPpToken* ppToken)
parseContext.setCurrentLine(lineRes);
if (token != '\n') {
#ifndef GLSLANG_WEB
if (token == PpAtomConstString) {
parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line");
// We need to save a copy of the string instead of pointing
@ -706,7 +732,9 @@ int TPpContext::CPPline(TPpToken* ppToken)
parseContext.setCurrentSourceName(sourceName);
hasFile = true;
token = scanToken(ppToken);
} else {
} else
#endif
{
token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken);
if (! fileErr) {
parseContext.setCurrentString(fileRes);
@ -767,10 +795,8 @@ int TPpContext::CPPpragma(TPpToken* ppToken)
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
@ -855,8 +881,7 @@ int TPpContext::CPPextension(TPpToken* ppToken)
if (token != PpAtomIdentifier)
parseContext.ppError(ppToken->loc, "extension name expected", "#extension", "");
assert(strlen(ppToken->name) <= MaxTokenLength);
strcpy(extensionName, ppToken->name);
snprintf(extensionName, sizeof(extensionName), "%s", ppToken->name);
token = scanToken(ppToken);
if (token != ':') {
@ -930,18 +955,20 @@ int TPpContext::readCPPline(TPpToken* ppToken)
case PpAtomIfndef:
token = CPPifdef(0, ppToken);
break;
case PpAtomLine:
token = CPPline(ppToken);
break;
#ifndef GLSLANG_WEB
case PpAtomInclude:
if(!parseContext.isReadingHLSL()) {
parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include");
}
token = CPPinclude(ppToken);
break;
case PpAtomLine:
token = CPPline(ppToken);
break;
case PpAtomPragma:
token = CPPpragma(ppToken);
break;
#endif
case PpAtomUndef:
token = CPPundef(ppToken);
break;
@ -1011,20 +1038,29 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
int token;
while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) {
token = tokenPaste(token, *ppToken);
if (token == PpAtomIdentifier) {
switch (MacroExpand(ppToken, false, newLineOkay)) {
case MacroExpandNotStarted:
break;
case MacroExpandError:
// toss the rest of the pushed-input argument by scanning until tMarkerInput
while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput)
;
break;
case MacroExpandStarted:
case MacroExpandUndef:
continue;
}
}
if (token == tMarkerInput::marker || token == EndOfInput)
break;
if (token == PpAtomIdentifier && MacroExpand(ppToken, false, newLineOkay) != 0)
continue;
expandedArg->putToken(token, ppToken);
}
if (token == EndOfInput) {
// MacroExpand ate the marker, so had bad input, recover
if (token != tMarkerInput::marker) {
// Error, or MacroExpand ate the marker, so had bad input, recover
delete expandedArg;
expandedArg = nullptr;
} else {
// remove the marker
popInput();
}
return expandedArg;
@ -1106,7 +1142,8 @@ int TPpContext::tZeroInput::scan(TPpToken* ppToken)
if (done)
return EndOfInput;
strcpy(ppToken->name, "0");
ppToken->name[0] = '0';
ppToken->name[1] = 0;
ppToken->ival = 0;
ppToken->space = false;
done = true;
@ -1115,14 +1152,18 @@ int TPpContext::tZeroInput::scan(TPpToken* ppToken)
}
//
// Check a token to see if it is a macro that should be expanded.
// If it is, and defined, push a tInput that will produce the appropriate expansion
// and return 1.
// If it is, but undefined, and expandUndef is requested, push a tInput that will
// expand to 0 and return -1.
// Otherwise, return 0 to indicate no expansion, which is not necessarily an error.
// Check a token to see if it is a macro that should be expanded:
// - If it is, and defined, push a tInput that will produce the appropriate
// expansion and return MacroExpandStarted.
// - If it is, but undefined, and expandUndef is requested, push a tInput
// that will expand to 0 and return MacroExpandUndef.
// - Otherwise, there is no expansion, and there are two cases:
// * It might be okay there is no expansion, and no specific error was
// detected. Returns MacroExpandNotStarted.
// * The expansion was started, but could not be completed, due to an error
// that cannot be recovered from. Returns MacroExpandError.
//
int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay)
MacroExpandResult TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay)
{
ppToken->space = false;
int macroAtom = atomStrings.getAtom(ppToken->name);
@ -1131,7 +1172,7 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
ppToken->ival = parseContext.getCurrentLoc().line;
snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
UngetToken(PpAtomConstInt, ppToken);
return 1;
return MacroExpandStarted;
case PpAtomFileMacro: {
if (parseContext.getCurrentLoc().name)
@ -1139,50 +1180,55 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
ppToken->ival = parseContext.getCurrentLoc().string;
snprintf(ppToken->name, sizeof(ppToken->name), "%s", ppToken->loc.getStringNameOrNum().c_str());
UngetToken(PpAtomConstInt, ppToken);
return 1;
return MacroExpandStarted;
}
case PpAtomVersionMacro:
ppToken->ival = parseContext.version;
snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
UngetToken(PpAtomConstInt, ppToken);
return 1;
return MacroExpandStarted;
default:
break;
}
MacroSymbol* macro = macroAtom == 0 ? nullptr : lookupMacroDef(macroAtom);
int depth = 0;
// no recursive expansions
if (macro != nullptr && macro->busy)
return 0;
return MacroExpandNotStarted;
// not expanding undefined macros
if ((macro == nullptr || macro->undef) && ! expandUndef)
return 0;
return MacroExpandNotStarted;
// 0 is the value of an undefined macro
if ((macro == nullptr || macro->undef) && expandUndef) {
pushInput(new tZeroInput(this));
return -1;
return MacroExpandUndef;
}
tMacroInput *in = new tMacroInput(this);
TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error
in->mac = macro;
if (macro->args.size() > 0 || macro->emptyArgs) {
int token = scanToken(ppToken);
if (macro->functionLike) {
// We don't know yet if this will be a successful call of a
// function-like macro; need to look for a '(', but without trashing
// the passed in ppToken, until we know we are no longer speculative.
TPpToken parenToken;
int token = scanToken(&parenToken);
if (newLineOkay) {
while (token == '\n')
token = scanToken(ppToken);
token = scanToken(&parenToken);
}
if (token != '(') {
UngetToken(token, ppToken);
// Function-like macro called with object-like syntax: okay, don't expand.
// (We ate exactly one token that might not be white space; put it back.
UngetToken(token, &parenToken);
delete in;
return 0;
return MacroExpandNotStarted;
}
in->args.resize(in->mac->args.size());
for (size_t i = 0; i < in->mac->args.size(); i++)
@ -1193,63 +1239,70 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
size_t arg = 0;
bool tokenRecorded = false;
do {
depth = 0;
while (1) {
TVector<char> nestStack;
while (true) {
token = scanToken(ppToken);
if (token == EndOfInput || token == tMarkerInput::marker) {
parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom));
delete in;
return 0;
return MacroExpandError;
}
if (token == '\n') {
if (! newLineOkay) {
parseContext.ppError(loc, "End of line in macro substitution:", "macro expansion", atomStrings.getString(macroAtom));
delete in;
return 0;
return MacroExpandError;
}
continue;
}
if (token == '#') {
parseContext.ppError(ppToken->loc, "unexpected '#'", "macro expansion", atomStrings.getString(macroAtom));
delete in;
return 0;
return MacroExpandError;
}
if (in->mac->args.size() == 0 && token != ')')
break;
if (depth == 0 && (token == ',' || token == ')'))
if (nestStack.size() == 0 && (token == ',' || token == ')'))
break;
if (token == '(')
depth++;
if (token == ')')
depth--;
nestStack.push_back(')');
else if (token == '{' && parseContext.isReadingHLSL())
nestStack.push_back('}');
else if (nestStack.size() > 0 && token == nestStack.back())
nestStack.pop_back();
in->args[arg]->putToken(token, ppToken);
tokenRecorded = true;
}
// end of single argument scan
if (token == ')') {
if (in->mac->args.size() == 1 && tokenRecorded == 0)
// closing paren of call
if (in->mac->args.size() == 1 && !tokenRecorded)
break;
arg++;
break;
}
arg++;
} while (arg < in->mac->args.size());
// end of all arguments scan
if (arg < in->mac->args.size())
parseContext.ppError(loc, "Too few args in Macro", "macro expansion", atomStrings.getString(macroAtom));
else if (token != ')') {
depth=0;
// Error recover code; find end of call, if possible
int depth = 0;
while (token != EndOfInput && (depth > 0 || token != ')')) {
if (token == ')')
if (token == ')' || token == '}')
depth--;
token = scanToken(ppToken);
if (token == '(')
if (token == '(' || token == '{')
depth++;
}
if (token == EndOfInput) {
parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom));
delete in;
return 0;
return MacroExpandError;
}
parseContext.ppError(loc, "Too many args in macro", "macro expansion", atomStrings.getString(macroAtom));
}
@ -1264,7 +1317,7 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
macro->busy = 1;
macro->body.reset();
return 1;
return MacroExpandStarted;
}
} // end namespace glslang

View File

@ -1,6 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without

118
Externals/glslang/glslang/MachineIndependent/preprocessor/PpContext.h vendored Executable file → Normal file
View File

@ -1,5 +1,6 @@
//
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -83,6 +84,7 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <sstream>
#include "../ParseHelper.h"
#include "PpTokens.h"
/* windows only pragma */
#ifdef _MSC_VER
@ -183,6 +185,13 @@ protected:
class TInputScanner;
enum MacroExpandResult {
MacroExpandNotStarted, // macro not expanded, which might not be an error
MacroExpandError, // a clear error occurred while expanding, no expansion
MacroExpandStarted, // macro expansion process has started
MacroExpandUndef // macro is undefined and will be expanded
};
// This class is the result of turning a huge pile of C code communicating through globals
// into a class. This was done to allowing instancing to attain thread safety.
// Don't expect too much in terms of OO design.
@ -204,7 +213,8 @@ public:
virtual int scan(TPpToken*) = 0;
virtual int getch() = 0;
virtual void ungetch() = 0;
virtual bool peekPasting() { return false; } // true when about to see ##
virtual bool peekPasting() { return false; } // true when about to see ##
virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste
virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
virtual bool isMacroInput() { return false; }
@ -235,24 +245,79 @@ public:
// From PpTokens.cpp
//
// Capture the needed parts of a token stream for macro recording/playback.
class TokenStream {
public:
TokenStream() : current(0) { }
// Manage a stream of these 'Token', which capture the relevant parts
// of a TPpToken, plus its atom.
class Token {
public:
Token(int atom, const TPpToken& ppToken) :
atom(atom),
space(ppToken.space),
i64val(ppToken.i64val),
name(ppToken.name) { }
int get(TPpToken& ppToken)
{
ppToken.clear();
ppToken.space = space;
ppToken.i64val = i64val;
snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str());
return atom;
}
bool isAtom(int a) const { return atom == a; }
int getAtom() const { return atom; }
bool nonSpaced() const { return !space; }
protected:
Token() {}
int atom;
bool space; // did a space precede the token?
long long i64val;
TString name;
};
TokenStream() : currentPos(0) { }
void putToken(int token, TPpToken* ppToken);
bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); }
bool peekContinuedPasting(int atom)
{
// This is basically necessary because, for example, the PP
// tokenizer only accepts valid numeric-literals plus suffixes, so
// separates numeric-literals plus bad suffix into two tokens, which
// should get both pasted together as one token when token pasting.
//
// The following code is a bit more generalized than the above example.
if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) {
switch(stream[currentPos].getAtom()) {
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
case PpAtomConstInt16:
case PpAtomConstUint16:
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
case PpAtomConstString:
case PpAtomIdentifier:
return true;
default:
break;
}
}
return false;
}
int getToken(TParseContextBase&, TPpToken*);
bool atEnd() { return current >= data.size(); }
bool atEnd() { return currentPos >= stream.size(); }
bool peekTokenizedPasting(bool lastTokenPastes);
bool peekUntokenizedPasting();
void reset() { current = 0; }
void reset() { currentPos = 0; }
protected:
void putSubtoken(char);
int getSubtoken();
void ungetSubtoken();
TVector<unsigned char> data;
size_t current;
TVector<Token> stream;
size_t currentPos;
};
//
@ -260,12 +325,12 @@ public:
//
struct MacroSymbol {
MacroSymbol() : emptyArgs(0), busy(0), undef(0) { }
MacroSymbol() : functionLike(0), busy(0), undef(0) { }
TVector<int> args;
TokenStream body;
unsigned emptyArgs : 1;
unsigned busy : 1;
unsigned undef : 1;
unsigned functionLike : 1; // 0 means object-like, 1 means function-like
unsigned busy : 1;
unsigned undef : 1;
};
typedef TMap<int, MacroSymbol> TSymbolMap;
@ -312,6 +377,10 @@ protected:
int getChar() { return inputStack.back()->getch(); }
void ungetChar() { inputStack.back()->ungetch(); }
bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
bool peekContinuedPasting(int a)
{
return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a);
}
bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
@ -336,6 +405,7 @@ protected:
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; }
bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); }
bool endOfReplacementList() override { return mac->body.atEnd(); }
bool isMacroInput() override { return true; }
@ -400,7 +470,7 @@ protected:
int readCPPline(TPpToken * ppToken);
int scanHeaderName(TPpToken* ppToken, char delimit);
TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
int MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
//
// From PpTokens.cpp
@ -410,14 +480,18 @@ protected:
class tTokenInput : public tInput {
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) :
tInput(pp),
tokens(t),
lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); }
protected:
TokenStream* tokens;
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
};
class tUngotTokenInput : public tInput {
@ -526,7 +600,7 @@ protected:
prologue_(prologue),
epilogue_(epilogue),
includedFile_(includedFile),
scanner(3, strings, lengths, names, 0, 0, true),
scanner(3, strings, lengths, nullptr, 0, 0, true),
prevScanner(nullptr),
stringInput(pp, scanner)
{
@ -541,9 +615,9 @@ protected:
scanner.setLine(startLoc.line);
scanner.setString(startLoc.string);
scanner.setFile(startLoc.name, 0);
scanner.setFile(startLoc.name, 1);
scanner.setFile(startLoc.name, 2);
scanner.setFile(startLoc.getFilenameStr(), 0);
scanner.setFile(startLoc.getFilenameStr(), 1);
scanner.setFile(startLoc.getFilenameStr(), 2);
}
// tInput methods:
@ -583,8 +657,6 @@ protected:
const char* strings[3];
// Length of str_, passed to scanner constructor.
size_t lengths[3];
// String names
const char* names[3];
// Scans over str_.
TInputScanner scanner;
// The previous effective scanner before the scanner in this instance

View File

@ -2,6 +2,8 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -94,12 +96,19 @@ namespace glslang {
/////////////////////////////////// Floating point constants: /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
/*
* lFloatConst() - Scan a single- or double-precision floating point constant. Assumes that the scanner
* has seen at least one digit, followed by either a decimal '.' or the
* letter 'e', or a precision ending (e.g., F or LF).
*/
//
// Scan a single- or double-precision floating point constant.
// Assumes that the scanner has seen at least one digit,
// followed by either a decimal '.' or the letter 'e', or a
// precision ending (e.g., F or LF).
//
// This is technically not correct, as the preprocessor should just
// accept the numeric literal along with whatever suffix it has, but
// currently, it stops on seeing a bad suffix, treating that as the
// next token. This effects things like token pasting, where it is
// relevant how many tokens something was broken into.
//
// See peekContinuedPasting().
int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
{
const auto saveName = [&](int ch) {
@ -133,6 +142,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
ch = getChar();
int firstDecimal = len;
#ifdef ENABLE_HLSL
// 1.#INF or -1.#INF
if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) {
if ((len < 2) ||
@ -160,6 +170,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
}
}
}
#endif
// Consume leading-zero digits after the decimal point
while (ch == '0') {
@ -248,6 +259,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
// Suffix:
bool isDouble = false;
bool isFloat16 = false;
#ifndef GLSLANG_WEB
if (ch == 'l' || ch == 'L') {
if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
parseContext.doubleCheck(ppToken->loc, "double floating-point suffix");
@ -286,11 +298,15 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
saveName(ch);
isFloat16 = true;
}
} else if (ch == 'f' || ch == 'F') {
} else
#endif
if (ch == 'f' || ch == 'F') {
#ifndef GLSLANG_WEB
if (ifdepth == 0)
parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
if (ifdepth == 0 && !parseContext.relaxedErrors())
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix");
#endif
if (ifdepth == 0 && !hasDecimalOrExponent)
parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
saveName(ch);
@ -316,16 +332,32 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
ppToken->dval = (double)wholeNumber * exponentValue;
} else {
// slow path
ppToken->dval = 0.0;
// remove suffix
TString numstr(ppToken->name);
if (numstr.back() == 'f' || numstr.back() == 'F')
numstr.pop_back();
if (numstr.back() == 'h' || numstr.back() == 'H')
numstr.pop_back();
if (numstr.back() == 'l' || numstr.back() == 'L')
numstr.pop_back();
// use platform library
strtodStream.clear();
strtodStream.str(ppToken->name);
strtodStream.str(numstr.c_str());
strtodStream >> ppToken->dval;
// Assume failure combined with a large exponent was overflow, in
// an attempt to set INF. Otherwise, assume underflow, and set 0.0.
if (strtodStream.fail()) {
// Assume failure combined with a large exponent was overflow, in
// an attempt to set INF.
if (!negativeExponent && exponent + numWholeNumberDigits > 300)
ppToken->i64val = 0x7ff0000000000000; // +Infinity
else
// Assume failure combined with a small exponent was overflow.
if (negativeExponent && exponent + numWholeNumberDigits > 300)
ppToken->dval = 0.0;
// Unknown reason for failure. Theory is that either
// - the 0.0 is still there, or
// - something reasonable was written that is better than 0.0
}
}
@ -417,6 +449,14 @@ int TPpContext::characterLiteral(TPpToken* ppToken)
//
// Scanner used to tokenize source stream.
//
// N.B. Invalid numeric suffixes are not consumed.//
// This is technically not correct, as the preprocessor should just
// accept the numeric literal along with whatever suffix it has, but
// currently, it stops on seeing a bad suffix, treating that as the
// next token. This effects things like token pasting, where it is
// relevant how many tokens something was broken into.
// See peekContinuedPasting().
//
int TPpContext::tStringInput::scan(TPpToken* ppToken)
{
int AlreadyComplained = 0;
@ -430,16 +470,14 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
static const char* const Int64_Extensions[] = {
E_GL_ARB_gpu_shader_int64,
E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int64 };
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int64 };
static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]);
static const char* const Int16_Extensions[] = {
#ifdef AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_KHX_shader_explicit_arithmetic_types,
E_GL_KHX_shader_explicit_arithmetic_types_int16 };
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16 };
static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]);
ppToken->ival = 0;
@ -546,6 +584,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -554,7 +593,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -563,12 +601,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -656,6 +692,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -664,7 +701,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -673,12 +709,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -747,6 +781,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -755,7 +790,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -764,12 +798,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -1061,8 +1093,17 @@ int TPpContext::tokenize(TPpToken& ppToken)
continue;
// expand macros
if (token == PpAtomIdentifier && MacroExpand(&ppToken, false, true) != 0)
continue;
if (token == PpAtomIdentifier) {
switch (MacroExpand(&ppToken, false, true)) {
case MacroExpandNotStarted:
break;
case MacroExpandError:
return EndOfInput;
case MacroExpandStarted:
case MacroExpandUndef:
continue;
}
}
switch (token) {
case PpAtomIdentifier:
@ -1089,7 +1130,7 @@ int TPpContext::tokenize(TPpToken& ppToken)
parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
continue;
default:
strcpy(ppToken.name, atomStrings.getString(token));
snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(token));
break;
}
@ -1126,61 +1167,69 @@ int TPpContext::tokenPaste(int token, TPpToken& ppToken)
break;
}
// get the token after the ##
token = scanToken(&pastedPpToken);
// Get the token(s) after the ##.
// Because of "space" semantics, and prior tokenization, what
// appeared a single token, e.g. "3A", might have been tokenized
// into two tokens "3" and "A", but the "A" will have 'space' set to
// false. Accumulate all of these to recreate the original lexical
// appearing token.
do {
token = scanToken(&pastedPpToken);
// This covers end of argument expansion
if (token == tMarkerInput::marker) {
parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
break;
}
// This covers end of argument expansion
if (token == tMarkerInput::marker) {
parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
return resultToken;
}
// get the token text
switch (resultToken) {
case PpAtomIdentifier:
// already have the correct text in token.names
break;
case '=':
case '!':
case '-':
case '~':
case '+':
case '*':
case '/':
case '%':
case '<':
case '>':
case '|':
case '^':
case '&':
case PpAtomRight:
case PpAtomLeft:
case PpAtomAnd:
case PpAtomOr:
case PpAtomXor:
strcpy(ppToken.name, atomStrings.getString(resultToken));
strcpy(pastedPpToken.name, atomStrings.getString(token));
break;
default:
parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
return resultToken;
}
// get the token text
switch (resultToken) {
case PpAtomIdentifier:
// already have the correct text in token.names
break;
case '=':
case '!':
case '-':
case '~':
case '+':
case '*':
case '/':
case '%':
case '<':
case '>':
case '|':
case '^':
case '&':
case PpAtomRight:
case PpAtomLeft:
case PpAtomAnd:
case PpAtomOr:
case PpAtomXor:
snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken));
snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token));
break;
default:
parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
return resultToken;
}
// combine the tokens
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
return resultToken;
}
strncat(ppToken.name, pastedPpToken.name, MaxTokenLength - strlen(ppToken.name));
// combine the tokens
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
return resultToken;
}
snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name),
"%s", pastedPpToken.name);
// correct the kind of token we are making, if needed (identifiers stay identifiers)
if (resultToken != PpAtomIdentifier) {
int newToken = atomStrings.getAtom(ppToken.name);
if (newToken > 0)
resultToken = newToken;
else
parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
}
// correct the kind of token we are making, if needed (identifiers stay identifiers)
if (resultToken != PpAtomIdentifier) {
int newToken = atomStrings.getAtom(ppToken.name);
if (newToken > 0)
resultToken = newToken;
else
parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
}
} while (peekContinuedPasting(resultToken));
}
return resultToken;

View File

@ -1,6 +1,8 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -97,150 +99,34 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace glslang {
namespace {
// When recording (and playing back) should the backing name string
// be saved (restored)?
bool SaveName(int atom)
{
switch (atom) {
case PpAtomIdentifier:
case PpAtomConstString:
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
return true;
default:
return false;
}
}
// When recording (and playing back) should the numeric value
// be saved (restored)?
bool SaveValue(int atom)
{
switch (atom) {
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
return true;
default:
return false;
}
}
}
// push onto back of stream
void TPpContext::TokenStream::putSubtoken(char subtoken)
{
data.push_back(static_cast<unsigned char>(subtoken));
}
// get the next token in stream
int TPpContext::TokenStream::getSubtoken()
{
if (current < data.size())
return data[current++];
else
return EndOfInput;
}
// back up one position in the stream
void TPpContext::TokenStream::ungetSubtoken()
{
if (current > 0)
--current;
}
// Add a complete token (including backing string) to the end of a list
// for later playback.
// Add a token (including backing string) to the end of a macro
// token stream, for later playback.
void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken)
{
// save the atom
assert((atom & ~0xff) == 0);
putSubtoken(static_cast<char>(atom));
// save the backing name string
if (SaveName(atom)) {
const char* s = ppToken->name;
while (*s)
putSubtoken(*s++);
putSubtoken(0);
}
// save the numeric value
if (SaveValue(atom)) {
const char* n = reinterpret_cast<const char*>(&ppToken->i64val);
for (int i = 0; i < sizeof(ppToken->i64val); ++i)
putSubtoken(*n++);
}
TokenStream::Token streamToken(atom, *ppToken);
stream.push_back(streamToken);
}
// Read the next token from a token stream.
// (Not the source stream, but a stream used to hold a tokenized macro).
// Read the next token from a macro token stream.
int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
{
// get the atom
int atom = getSubtoken();
if (atom == EndOfInput)
return atom;
if (atEnd())
return EndOfInput;
// init the token
ppToken->clear();
int atom = stream[currentPos++].get(*ppToken);
ppToken->loc = parseContext.getCurrentLoc();
// get the backing name string
if (SaveName(atom)) {
int ch = getSubtoken();
int len = 0;
while (ch != 0 && ch != EndOfInput) {
if (len < MaxTokenLength) {
ppToken->name[len] = (char)ch;
len++;
ch = getSubtoken();
} else {
parseContext.error(ppToken->loc, "token too long", "", "");
break;
}
}
ppToken->name[len] = 0;
}
#ifndef GLSLANG_WEB
// Check for ##, unless the current # is the last character
if (atom == '#') {
if (current < data.size()) {
if (getSubtoken() == '#') {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
atom = PpAtomPaste;
} else
ungetSubtoken();
if (peekToken('#')) {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
currentPos++;
atom = PpAtomPaste;
}
}
// get the numeric value
if (SaveValue(atom)) {
char* n = reinterpret_cast<char*>(&ppToken->i64val);
for (int i = 0; i < sizeof(ppToken->i64val); ++i)
*n++ = getSubtoken();
}
#endif
return atom;
}
@ -254,15 +140,14 @@ bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
{
// 1. preceding ##?
size_t savePos = current;
int subtoken;
size_t savePos = currentPos;
// skip white space
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
current = savePos;
if (subtoken == PpAtomPaste)
while (peekToken(' '))
++currentPos;
if (peekToken(PpAtomPaste)) {
currentPos = savePos;
return true;
}
// 2. last token and we've been told after this there will be a ##
@ -271,18 +156,18 @@ bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
// Getting here means the last token will be pasted, after this
// Are we at the last non-whitespace token?
savePos = current;
savePos = currentPos;
bool moreTokens = false;
do {
subtoken = getSubtoken();
if (subtoken == EndOfInput)
if (atEnd())
break;
if (subtoken != ' ') {
if (!peekToken(' ')) {
moreTokens = true;
break;
}
++currentPos;
} while (true);
current = savePos;
currentPos = savePos;
return !moreTokens;
}
@ -291,23 +176,21 @@ bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
bool TPpContext::TokenStream::peekUntokenizedPasting()
{
// don't return early, have to restore this
size_t savePos = current;
size_t savePos = currentPos;
// skip white-space
int subtoken;
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
while (peekToken(' '))
++currentPos;
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = getSubtoken();
if (subtoken == '#')
if (peekToken('#')) {
++currentPos;
if (peekToken('#'))
pasting = true;
}
current = savePos;
currentPos = savePos;
return pasting;
}

View File

@ -37,6 +37,8 @@
// propagate the 'noContraction' qualifier.
//
#ifndef GLSLANG_WEB
#include "propagateNoContraction.h"
#include <cstdlib>
@ -79,7 +81,7 @@ typedef std::unordered_set<glslang::TIntermBranch*> ReturnBranchNodeSet;
// the node has 'noContraction' qualifier, otherwise false.
bool isPreciseObjectNode(glslang::TIntermTyped* node)
{
return node->getType().getQualifier().noContraction;
return node->getType().getQualifier().isNoContraction();
}
// Returns true if the opcode is a dereferencing one.
@ -864,3 +866,5 @@ void PropagateNoContraction(const glslang::TIntermediate& intermediate)
}
}
};
#endif // GLSLANG_WEB

View File

@ -33,6 +33,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "../Include/Common.h"
#include "reflection.h"
#include "LiveTraverser.h"
@ -93,72 +95,133 @@ public:
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
base.getQualifier().storage, true);
}
}
void addAttribute(const TIntermSymbol& base)
void addPipeIOVariable(const TIntermSymbol& base)
{
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
const TString &name = base.getName();
const TType &type = base.getType();
const bool input = base.getQualifier().isPipeInput();
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name] = (int)reflection.indexToAttribute.size();
reflection.indexToAttribute.push_back(TObjectReflection(name, type, 0, mapToGlType(type), 0, 0));
TReflection::TMapIndexToReflection &ioItems =
input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
TReflection::TNameToIndex &ioMapper =
input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
if (reflection.options & EShReflectionUnwrapIOBlocks) {
bool anonymous = IsAnonymous(name);
TString baseName;
if (type.getBasicType() == EbtBlock) {
baseName = anonymous ? TString() : type.getTypeName();
} else {
baseName = anonymous ? TString() : name;
}
// by convention if this is an arrayed block we ignore the array in the reflection
if (type.isArray() && type.getBasicType() == EbtBlock) {
blowUpIOAggregate(input, baseName, TType(type, 0));
} else {
blowUpIOAggregate(input, baseName, type);
}
} else {
TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
if (it == ioMapper.end()) {
// seperate pipe i/o params from uniforms and blocks
// in is only for input in first stage as out is only for last stage. check traverse in call stack.
ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
ioItems.push_back(
TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
EShLanguageMask& stages = ioItems.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
EShLanguageMask& stages = ioItems[it->second].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
}
}
// Lookup or calculate the offset of a block member, using the recursively
// Lookup or calculate the offset of all block members at once, using the recursively
// defined block offset rules.
int getOffset(const TType& type, int index)
void getOffsets(const TType& type, TVector<int>& offsets)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize;
int dummyStride;
int memberSize = 0;
int offset = 0;
for (int m = 0; m <= index; ++m) {
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
int memberAlignment = intermediate.getBaseAlignment(*memberList[m].type, memberSize, dummyStride,
type.getQualifier().layoutPacking == ElpStd140,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: type.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
if (m < index)
offset += memberSize;
}
for (size_t m = 0; m < offsets.size(); ++m) {
// if the user supplied an offset, snap to it now
if (memberList[m].type->getQualifier().hasOffset())
offset = memberList[m].type->getQualifier().layoutOffset;
return offset;
// calculate the offset of the next member and align the current offset to this member
intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
// save the offset of this member
offsets[m] = offset;
// update for the next member
offset += memberSize;
}
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int getBlockSize(const TType& blockType)
// Calculate the stride of an array type
int getArrayStride(const TType& baseType, const TType& type)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
int dummySize;
int stride;
int lastMemberSize;
int dummyStride;
intermediate.getBaseAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking == ElpStd140,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
// consider blocks to have 0 stride, so that all offsets are relative to the start of their block
if (type.getBasicType() == EbtBlock)
return 0;
return lastOffset + lastMemberSize;
TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
intermediate.getMemberAlignment(type, dummySize, stride,
baseType.getQualifier().layoutPacking,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: baseType.getQualifier().layoutMatrix == ElmRowMajor);
return stride;
}
// count the total number of leaf members from iterating out of a block type
int countAggregateMembers(const TType& parentType)
{
if (! parentType.isStruct())
return 1;
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
const TTypeList &memberList = *parentType.getStruct();
int ret = 0;
for (size_t i = 0; i < memberList.size(); i++)
{
const TType &memberType = *memberList[i].type;
int numMembers = countAggregateMembers(memberType);
// for sized arrays of structs, apply logic to expand out the same as we would below in
// blowUpActiveAggregate
if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
if (! strictArraySuffix || ! blockParent)
numMembers *= memberType.getArraySizes()->getCumulativeSize();
}
ret += numMembers;
}
return ret;
}
// Traverse the provided deref chain, including the base, and
@ -169,8 +232,19 @@ public:
// arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
// A value of 0 for arraySize will mean to use the full array's size.
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
{
// when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
// Broadly:
// * arrays-of-structs always have a [x] suffix.
// * with array-of-struct variables in the root of a buffer block, only ever return [0].
// * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
// is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
// process the part of the dereference chain that was explicit in the shader
TString name = baseName;
const TType* terminalType = &baseType;
@ -179,29 +253,54 @@ public:
terminalType = &visitNode->getType();
int index;
switch (visitNode->getOp()) {
case EOpIndexIndirect:
case EOpIndexIndirect: {
int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
// Visit all the indices of this array, and for each one add on the remaining dereferencing
for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
if (baseType.getBasicType() != EbtBlock)
if (strictArraySuffix && blockParent)
newBaseName.append(TString("[0]"));
else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
newBaseName.append(TString("[") + String(i) + "]");
TList<TIntermBinary*>::const_iterator nextDeref = deref;
++nextDeref;
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
topLevelArrayStride, baseStorage, active);
if (offset >= 0)
offset += stride;
}
// it was all completed in the recursive calls above
return;
case EOpIndexDirect:
}
case EOpIndexDirect: {
int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (baseType.getBasicType() != EbtBlock)
if (strictArraySuffix && blockParent) {
name.append(TString("[0]"));
} else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
name.append(TString("[") + String(index) + "]");
if (offset >= 0)
offset += stride * index;
}
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
blockParent = false;
break;
}
case EOpIndexDirectStruct:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (offset >= 0)
offset += getOffset(visitNode->getLeft()->getType(), index);
offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
if (name.size() > 0)
name.append(".");
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
@ -213,24 +312,65 @@ public:
// if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
if (! isReflectionGranularity(*terminalType)) {
// the base offset of this node, that children are relative to
int baseOffset = offset;
if (terminalType->isArray()) {
// Visit all the indices of this array, and for each one,
// fully explode the remaining aggregate to dereference
for (int i = 0; i < std::max(terminalType->getOuterArraySize(), 1); ++i) {
int stride = 0;
if (offset >= 0)
stride = getArrayStride(baseType, *terminalType);
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
// for top-level arrays in blocks, only expand [0] to avoid explosion of items
if (strictArraySuffix && blockParent)
arrayIterateSize = 1;
for (int i = 0; i < arrayIterateSize; ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
if (offset >= 0)
offset = baseOffset + stride * i;
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
topLevelArrayStride, baseStorage, active);
}
} else {
// Visit all members of this aggregate, and for each one,
// fully explode the remaining aggregate to dereference
const TTypeList& typeList = *terminalType->getStruct();
TVector<int> memberOffsets;
if (baseOffset >= 0) {
memberOffsets.resize(typeList.size());
getOffsets(*terminalType, memberOffsets);
}
for (int i = 0; i < (int)typeList.size(); ++i) {
TString newBaseName = name;
newBaseName.append(TString(".") + typeList[i].type->getFieldName());
if (newBaseName.size() > 0)
newBaseName.append(".");
newBaseName.append(typeList[i].type->getFieldName());
TType derefType(*terminalType, i);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
if (offset >= 0)
offset = baseOffset + memberOffsets[i];
int arrayStride = topLevelArrayStride;
if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
derefType.isArray()) {
arrayStride = getArrayStride(baseType, derefType);
}
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
arrayStride, baseStorage, active);
}
}
@ -238,6 +378,10 @@ public:
return;
}
if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
name.append(TString("[0]"));
}
// Finally, add a full string to the reflection database, and update the array size if necessary.
// If the dereferenced entity to record is an array, compute the size and update the maximum size.
@ -245,15 +389,100 @@ public:
if (arraySize == 0)
arraySize = mapToGlArraySize(*terminalType);
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name] = (int)reflection.indexToUniform.size();
reflection.indexToUniform.push_back(TObjectReflection(name, *terminalType, offset,
mapToGlType(*terminalType),
arraySize, blockIndex));
} else if (arraySize > 1) {
int& reflectedArraySize = reflection.indexToUniform[it->second].size;
reflectedArraySize = std::max(arraySize, reflectedArraySize);
int uniformIndex = (int)variables.size();
reflection.nameToIndex[name.c_str()] = uniformIndex;
variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
arraySize, blockIndex));
if (terminalType->isArray()) {
variables.back().arrayStride = getArrayStride(baseType, *terminalType);
if (topLevelArrayStride == 0)
topLevelArrayStride = variables.back().arrayStride;
}
if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
reflection.atomicCounterUniformIndices.push_back(uniformIndex);
variables.back().topLevelArrayStride = topLevelArrayStride;
if ((reflection.options & EShReflectionAllBlockVariables) && active) {
EShLanguageMask& stages = variables.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
} else {
if (arraySize > 1) {
int& reflectedArraySize = variables[it->second].size;
reflectedArraySize = std::max(arraySize, reflectedArraySize);
}
if ((reflection.options & EShReflectionAllBlockVariables) && active) {
EShLanguageMask& stages = variables[it->second].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
}
// similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
{
TString name = baseName;
// if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
if (! isReflectionGranularity(type)) {
if (type.isArray()) {
// Visit all the indices of this array, and for each one,
// fully explode the remaining aggregate to dereference
for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(type, 0);
blowUpIOAggregate(input, newBaseName, derefType);
}
} else {
// Visit all members of this aggregate, and for each one,
// fully explode the remaining aggregate to dereference
const TTypeList& typeList = *type.getStruct();
for (int i = 0; i < (int)typeList.size(); ++i) {
TString newBaseName = name;
if (newBaseName.size() > 0)
newBaseName.append(".");
newBaseName.append(typeList[i].type->getFieldName());
TType derefType(type, i);
blowUpIOAggregate(input, newBaseName, derefType);
}
}
// it was all completed in the recursive calls above
return;
}
if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
name.append(TString("[0]"));
}
TReflection::TMapIndexToReflection &ioItems =
input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
std::string namespacedName = input ? "in " : "out ";
namespacedName += name.c_str();
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[namespacedName] = (int)ioItems.size();
ioItems.push_back(
TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
EShLanguageMask& stages = ioItems.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
EShLanguageMask& stages = ioItems[it->second].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
@ -303,6 +532,10 @@ public:
anonymous = IsAnonymous(base->getName());
const TString& blockName = base->getType().getTypeName();
TString baseName;
if (! anonymous)
baseName = blockName;
if (base->getType().isArray()) {
TType derefType(base->getType(), 0);
@ -310,9 +543,60 @@ public:
assert(! anonymous);
for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
getBlockSize(base->getType()));
intermediate.getBlockSize(base->getType()));
baseName.append(TString("[0]"));
} else
blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
if (reflection.options & EShReflectionAllBlockVariables) {
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
// because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
// member in the struct definition. This will lose any information about whether the parent was a buffer
// block. So if we're using strict array rules which don't expand the first child of a buffer block we
// instead iterate over the children here.
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
if (strictArraySuffix && blockParent) {
TType structDerefType(base->getType(), 0);
const TType &structType = base->getType().isArray() ? structDerefType : base->getType();
const TTypeList& typeList = *structType.getStruct();
TVector<int> memberOffsets;
memberOffsets.resize(typeList.size());
getOffsets(structType, memberOffsets);
for (int i = 0; i < (int)typeList.size(); ++i) {
TType derefType(structType, i);
TString name = baseName;
if (name.size() > 0)
name.append(".");
name.append(typeList[i].type->getFieldName());
// if this member is an array, store the top-level array stride but start the explosion from
// the inner struct type.
if (derefType.isArray() && derefType.isStruct()) {
name.append("[0]");
blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
blockIndex, 0, getArrayStride(structType, derefType),
base->getQualifier().storage, false);
} else {
blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
0, 0, base->getQualifier().storage, false);
}
}
} else {
// otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
// expanding root arrays anyway, just start the iteration from the base block type.
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
base->getQualifier().storage, false);
}
}
}
// Process the dereference chain, backward, accumulating the pieces for later forward traversal.
@ -342,27 +626,39 @@ public:
else
baseName = base->getName();
}
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
base->getQualifier().storage, true);
}
int addBlockName(const TString& name, const TType& type, int size)
{
TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
int blockIndex;
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {
blockIndex = (int)reflection.indexToUniformBlock.size();
reflection.nameToIndex[name] = blockIndex;
reflection.indexToUniformBlock.push_back(TObjectReflection(name, type, -1, -1, size, -1));
} else
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
blockIndex = (int)blocks.size();
reflection.nameToIndex[name.c_str()] = blockIndex;
blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
blocks.back().numMembers = countAggregateMembers(type);
EShLanguageMask& stages = blocks.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
blockIndex = it->second;
EShLanguageMask& stages = blocks[blockIndex].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
return blockIndex;
}
// Are we at a level in a dereference chain at which individual active uniform queries are made?
bool isReflectionGranularity(const TType& type)
{
return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct;
return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
}
// For a binary operation indexing into an aggregate, chase down the base of the aggregate.
@ -415,7 +711,6 @@ public:
case EsdBuffer:
return GL_SAMPLER_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
@ -444,7 +739,6 @@ public:
case EsdBuffer:
return GL_FLOAT16_SAMPLER_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
@ -507,7 +801,6 @@ public:
case EsdBuffer:
return GL_IMAGE_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
@ -526,7 +819,6 @@ public:
case EsdBuffer:
return GL_FLOAT16_IMAGE_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
@ -592,9 +884,7 @@ public:
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT_VEC2 + offset;
case EbtDouble: return GL_DOUBLE_VEC2 + offset;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
#endif
case EbtInt: return GL_INT_VEC2 + offset;
case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
case EbtInt64: return GL_INT64_ARB + offset;
@ -654,7 +944,6 @@ public:
default: return 0;
}
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch (type.getMatrixCols()) {
case 2:
@ -679,7 +968,6 @@ public:
default: return 0;
}
}
#endif
default:
return 0;
}
@ -688,9 +976,7 @@ public:
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT;
case EbtDouble: return GL_DOUBLE;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_NV;
#endif
case EbtInt: return GL_INT;
case EbtUint: return GL_UNSIGNED_INT;
case EbtInt64: return GL_INT64_ARB;
@ -746,8 +1032,47 @@ void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
if (base->getQualifier().storage == EvqUniform)
addUniform(*base);
if (intermediate.getStage() == EShLangVertex && base->getQualifier().isPipeInput())
addAttribute(*base);
if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
(intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
addPipeIOVariable(*base);
}
//
// Implement TObjectReflection methods.
//
TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
int pSize, int pIndex)
: name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
{
}
int TObjectReflection::getBinding() const
{
if (type == nullptr || !type->getQualifier().hasBinding())
return -1;
return type->getQualifier().layoutBinding;
}
void TObjectReflection::dump() const
{
printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
index, getBinding(), stages);
if (counterIndex != -1)
printf(", counter %d", counterIndex);
if (numMembers != -1)
printf(", numMembers %d", numMembers);
if (arrayStride != 0)
printf(", arrayStride %d", arrayStride);
if (topLevelArrayStride != 0)
printf(", topLevelArrayStride %d", topLevelArrayStride);
printf("\n");
}
//
@ -768,14 +1093,32 @@ void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediat
// build counter block index associations for buffers
void TReflection::buildCounterIndices(const TIntermediate& intermediate)
{
#ifdef ENABLE_HLSL
// search for ones that have counters
for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name));
const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
const int index = getIndex(counterName);
if (index >= 0)
indexToUniformBlock[i].counterIndex = index;
}
#endif
}
// build Shader Stages mask for all uniforms
void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
{
if (options & EShReflectionAllBlockVariables)
return;
for (int i = 0; i < int(indexToUniform.size()); ++i) {
indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
}
for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
indexToBufferVariable[i].stages =
static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
}
}
// Merge live symbols from 'intermediate' into the existing reflection database.
@ -803,6 +1146,7 @@ bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
}
buildCounterIndices(intermediate);
buildUniformStageMask(intermediate);
return true;
}
@ -819,9 +1163,24 @@ void TReflection::dump()
indexToUniformBlock[i].dump();
printf("\n");
printf("Vertex attribute reflection:\n");
for (size_t i = 0; i < indexToAttribute.size(); ++i)
indexToAttribute[i].dump();
printf("Buffer variable reflection:\n");
for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
indexToBufferVariable[i].dump();
printf("\n");
printf("Buffer block reflection:\n");
for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
indexToBufferBlock[i].dump();
printf("\n");
printf("Pipeline input reflection:\n");
for (size_t i = 0; i < indexToPipeInput.size(); ++i)
indexToPipeInput[i].dump();
printf("\n");
printf("Pipeline output reflection:\n");
for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
indexToPipeOutput[i].dump();
printf("\n");
if (getLocalSize(0) > 1) {
@ -841,3 +1200,5 @@ void TReflection::dump()
}
} // end namespace glslang
#endif // GLSLANG_WEB

View File

@ -33,6 +33,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#ifndef _REFLECTION_INCLUDED
#define _REFLECTION_INCLUDED
@ -52,49 +54,11 @@ class TIntermediate;
class TIntermAggregate;
class TReflectionTraverser;
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
TObjectReflection(const TString& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex) :
name(pName), offset(pOffset),
glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1), type(pType.clone()) { }
const TType* const getType() const { return type; }
int getBinding() const
{
if (type == nullptr || !type->getQualifier().hasBinding())
return -1;
return type->getQualifier().layoutBinding;
}
void dump() const
{
printf("%s: offset %d, type %x, size %d, index %d, binding %d",
name.c_str(), offset, glDefineType, size, index, getBinding() );
if (counterIndex != -1)
printf(", counter %d", counterIndex);
printf("\n");
}
static TObjectReflection badReflection() { return TObjectReflection(); }
TString name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
protected:
TObjectReflection() : offset(-1), glDefineType(-1), size(-1), index(-1), type(nullptr) { }
const TType* type;
};
// The full reflection database
class TReflection {
public:
TReflection() : badReflection(TObjectReflection::badReflection())
TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last)
: options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection())
{
for (int dim=0; dim<3; ++dim)
localSize[dim] = 0;
@ -125,17 +89,57 @@ public:
return badReflection;
}
// for mapping an attribute index to the attribute's description
int getNumAttributes() { return (int)indexToAttribute.size(); }
const TObjectReflection& getAttribute(int i) const
// for mapping an pipeline input index to the input's description
int getNumPipeInputs() { return (int)indexToPipeInput.size(); }
const TObjectReflection& getPipeInput(int i) const
{
if (i >= 0 && i < (int)indexToAttribute.size())
return indexToAttribute[i];
if (i >= 0 && i < (int)indexToPipeInput.size())
return indexToPipeInput[i];
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and attribute names)
// for mapping an pipeline output index to the output's description
int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); }
const TObjectReflection& getPipeOutput(int i) const
{
if (i >= 0 && i < (int)indexToPipeOutput.size())
return indexToPipeOutput[i];
else
return badReflection;
}
// for mapping from an atomic counter to the uniform index
int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); }
const TObjectReflection& getAtomicCounter(int i) const
{
if (i >= 0 && i < (int)atomicCounterUniformIndices.size())
return getUniform(atomicCounterUniformIndices[i]);
else
return badReflection;
}
// for mapping a buffer variable index to a buffer variable object's description
int getNumBufferVariables() { return (int)indexToBufferVariable.size(); }
const TObjectReflection& getBufferVariable(int i) const
{
if (i >= 0 && i < (int)indexToBufferVariable.size())
return indexToBufferVariable[i];
else
return badReflection;
}
// for mapping a storage block index to the storage block's description
int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); }
const TObjectReflection& getStorageBufferBlock(int i) const
{
if (i >= 0 && i < (int)indexToBufferBlock.size())
return indexToBufferBlock[i];
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and input/output names)
int getIndex(const char* name) const
{
TNameToIndex::const_iterator it = nameToIndex.find(name);
@ -148,6 +152,20 @@ public:
// see getIndex(const char*)
int getIndex(const TString& name) const { return getIndex(name.c_str()); }
// for mapping any name to its index (only pipe input/output names)
int getPipeIOIndex(const char* name, const bool inOrOut) const
{
TNameToIndex::const_iterator it = inOrOut ? pipeInNameToIndex.find(name) : pipeOutNameToIndex.find(name);
if (it == (inOrOut ? pipeInNameToIndex.end() : pipeOutNameToIndex.end()))
return -1;
else
return it->second;
}
// see gePipeIOIndex(const char*, const bool)
int getPipeIOIndex(const TString& name, const bool inOrOut) const { return getPipeIOIndex(name.c_str(), inOrOut); }
// Thread local size
unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
@ -157,17 +175,43 @@ protected:
friend class glslang::TReflectionTraverser;
void buildCounterIndices(const TIntermediate&);
void buildUniformStageMask(const TIntermediate& intermediate);
void buildAttributeReflection(EShLanguage, const TIntermediate&);
// Need a TString hash: typedef std::unordered_map<TString, int> TNameToIndex;
typedef std::map<TString, int> TNameToIndex;
typedef std::map<std::string, int> TNameToIndex;
typedef std::vector<TObjectReflection> TMapIndexToReflection;
typedef std::vector<int> TIndices;
TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage)
{
if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
return indexToBufferBlock;
return indexToUniformBlock;
}
TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage)
{
if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
return indexToBufferVariable;
return indexToUniform;
}
EShReflectionOptions options;
EShLanguage firstStage;
EShLanguage lastStage;
TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
TNameToIndex pipeInNameToIndex; // maps pipe in names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TNameToIndex pipeOutNameToIndex; // maps pipe out names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TMapIndexToReflection indexToUniform;
TMapIndexToReflection indexToUniformBlock;
TMapIndexToReflection indexToAttribute;
TMapIndexToReflection indexToBufferVariable;
TMapIndexToReflection indexToBufferBlock;
TMapIndexToReflection indexToPipeInput;
TMapIndexToReflection indexToPipeOutput;
TIndices atomicCounterUniformIndices;
unsigned int localSize[3];
};
@ -175,3 +219,5 @@ protected:
} // end namespace glslang
#endif // _REFLECTION_INCLUDED
#endif // GLSLANG_WEB

View File

@ -2,7 +2,25 @@ add_library(OSDependent STATIC ossource.cpp ../osinclude.h)
set_property(TARGET OSDependent PROPERTY FOLDER glslang)
set_property(TARGET OSDependent PROPERTY POSITION_INDEPENDENT_CODE ON)
# Link pthread
set(CMAKE_THREAD_PREFER_PTHREAD ON)
if(${CMAKE_VERSION} VERSION_LESS "3.1.0" OR CMAKE_CROSSCOMPILING)
# Needed as long as we support CMake 2.8 for Ubuntu 14.04,
# which does not support the recommended Threads::Threads target.
# https://cmake.org/cmake/help/v2.8.12/cmake.html#module:FindThreads
# Also needed when cross-compiling to work around
# https://gitlab.kitware.com/cmake/cmake/issues/16920
find_package(Threads)
target_link_libraries(OSDependent ${CMAKE_THREAD_LIBS_INIT})
else()
# This is the recommended way, so we use it for 3.1+.
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads)
target_link_libraries(OSDependent Threads::Threads)
endif()
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OSDependent
install(TARGETS OSDependent EXPORT OSDependentTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT OSDependentTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -45,7 +45,10 @@
#include <stdint.h>
#include <cstdio>
#include <sys/time.h>
#if !defined(__Fuchsia__)
#include <sys/resource.h>
#endif
namespace glslang {
@ -70,7 +73,7 @@ static void DetachThreadLinux(void *)
//
void OS_CleanupThreadData(void)
{
#ifdef __ANDROID__
#if defined(__ANDROID__) || defined(__Fuchsia__)
DetachThreadLinux(NULL);
#else
int old_cancel_state, old_cancel_type;

View File

@ -0,0 +1,24 @@
add_executable(glslang.js "glslang.js.cpp")
glslang_set_link_args(glslang.js)
target_link_libraries(glslang.js glslang SPIRV)
if(EMSCRIPTEN)
set_target_properties(glslang.js PROPERTIES
OUTPUT_NAME "glslang"
SUFFIX ".js")
em_link_pre_js(glslang.js "${CMAKE_CURRENT_SOURCE_DIR}/glslang.pre.js")
target_link_options(glslang.js PRIVATE
"SHELL:--bind -s MODULARIZE=1")
if(ENABLE_EMSCRIPTEN_ENVIRONMENT_NODE)
target_link_options(glslang.js PRIVATE
"SHELL:-s ENVIRONMENT=node -s BINARYEN_ASYNC_COMPILATION=0")
else()
target_link_options(glslang.js PRIVATE
"SHELL:-s ENVIRONMENT=web,worker")
endif()
if(NOT ENABLE_EMSCRIPTEN_ENVIRONMENT_NODE)
add_custom_command(TARGET glslang.js POST_BUILD
COMMAND cat ${CMAKE_CURRENT_SOURCE_DIR}/glslang.after.js >> ${CMAKE_CURRENT_BINARY_DIR}/glslang.js)
endif()
endif(EMSCRIPTEN)

View File

@ -0,0 +1,26 @@
export default (() => {
const initialize = () => {
return new Promise(resolve => {
Module({
locateFile() {
const i = import.meta.url.lastIndexOf('/')
return import.meta.url.substring(0, i) + '/glslang.wasm';
},
onRuntimeInitialized() {
resolve({
compileGLSLZeroCopy: this.compileGLSLZeroCopy,
compileGLSL: this.compileGLSL,
});
},
});
});
};
let instance;
return () => {
if (!instance) {
instance = initialize();
}
return instance;
};
})();

View File

@ -0,0 +1,269 @@
//
// Copyright (C) 2019 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include <cstdio>
#include <cstdint>
#include <memory>
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
#include "../../../SPIRV/GlslangToSpv.h"
#include "../../../glslang/Public/ShaderLang.h"
#ifndef __EMSCRIPTEN__
#define EMSCRIPTEN_KEEPALIVE
#endif
const TBuiltInResource DefaultTBuiltInResource = {
/* .MaxLights = */ 32,
/* .MaxClipPlanes = */ 6,
/* .MaxTextureUnits = */ 32,
/* .MaxTextureCoords = */ 32,
/* .MaxVertexAttribs = */ 64,
/* .MaxVertexUniformComponents = */ 4096,
/* .MaxVaryingFloats = */ 64,
/* .MaxVertexTextureImageUnits = */ 32,
/* .MaxCombinedTextureImageUnits = */ 80,
/* .MaxTextureImageUnits = */ 32,
/* .MaxFragmentUniformComponents = */ 4096,
/* .MaxDrawBuffers = */ 32,
/* .MaxVertexUniformVectors = */ 128,
/* .MaxVaryingVectors = */ 8,
/* .MaxFragmentUniformVectors = */ 16,
/* .MaxVertexOutputVectors = */ 16,
/* .MaxFragmentInputVectors = */ 15,
/* .MinProgramTexelOffset = */ -8,
/* .MaxProgramTexelOffset = */ 7,
/* .MaxClipDistances = */ 8,
/* .MaxComputeWorkGroupCountX = */ 65535,
/* .MaxComputeWorkGroupCountY = */ 65535,
/* .MaxComputeWorkGroupCountZ = */ 65535,
/* .MaxComputeWorkGroupSizeX = */ 1024,
/* .MaxComputeWorkGroupSizeY = */ 1024,
/* .MaxComputeWorkGroupSizeZ = */ 64,
/* .MaxComputeUniformComponents = */ 1024,
/* .MaxComputeTextureImageUnits = */ 16,
/* .MaxComputeImageUniforms = */ 8,
/* .MaxComputeAtomicCounters = */ 8,
/* .MaxComputeAtomicCounterBuffers = */ 1,
/* .MaxVaryingComponents = */ 60,
/* .MaxVertexOutputComponents = */ 64,
/* .MaxGeometryInputComponents = */ 64,
/* .MaxGeometryOutputComponents = */ 128,
/* .MaxFragmentInputComponents = */ 128,
/* .MaxImageUnits = */ 8,
/* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8,
/* .MaxCombinedShaderOutputResources = */ 8,
/* .MaxImageSamples = */ 0,
/* .MaxVertexImageUniforms = */ 0,
/* .MaxTessControlImageUniforms = */ 0,
/* .MaxTessEvaluationImageUniforms = */ 0,
/* .MaxGeometryImageUniforms = */ 0,
/* .MaxFragmentImageUniforms = */ 8,
/* .MaxCombinedImageUniforms = */ 8,
/* .MaxGeometryTextureImageUnits = */ 16,
/* .MaxGeometryOutputVertices = */ 256,
/* .MaxGeometryTotalOutputComponents = */ 1024,
/* .MaxGeometryUniformComponents = */ 1024,
/* .MaxGeometryVaryingComponents = */ 64,
/* .MaxTessControlInputComponents = */ 128,
/* .MaxTessControlOutputComponents = */ 128,
/* .MaxTessControlTextureImageUnits = */ 16,
/* .MaxTessControlUniformComponents = */ 1024,
/* .MaxTessControlTotalOutputComponents = */ 4096,
/* .MaxTessEvaluationInputComponents = */ 128,
/* .MaxTessEvaluationOutputComponents = */ 128,
/* .MaxTessEvaluationTextureImageUnits = */ 16,
/* .MaxTessEvaluationUniformComponents = */ 1024,
/* .MaxTessPatchComponents = */ 120,
/* .MaxPatchVertices = */ 32,
/* .MaxTessGenLevel = */ 64,
/* .MaxViewports = */ 16,
/* .MaxVertexAtomicCounters = */ 0,
/* .MaxTessControlAtomicCounters = */ 0,
/* .MaxTessEvaluationAtomicCounters = */ 0,
/* .MaxGeometryAtomicCounters = */ 0,
/* .MaxFragmentAtomicCounters = */ 8,
/* .MaxCombinedAtomicCounters = */ 8,
/* .MaxAtomicCounterBindings = */ 1,
/* .MaxVertexAtomicCounterBuffers = */ 0,
/* .MaxTessControlAtomicCounterBuffers = */ 0,
/* .MaxTessEvaluationAtomicCounterBuffers = */ 0,
/* .MaxGeometryAtomicCounterBuffers = */ 0,
/* .MaxFragmentAtomicCounterBuffers = */ 1,
/* .MaxCombinedAtomicCounterBuffers = */ 1,
/* .MaxAtomicCounterBufferSize = */ 16384,
/* .MaxTransformFeedbackBuffers = */ 4,
/* .MaxTransformFeedbackInterleavedComponents = */ 64,
/* .MaxCullDistances = */ 8,
/* .MaxCombinedClipAndCullDistances = */ 8,
/* .MaxSamples = */ 4,
/* .maxMeshOutputVerticesNV = */ 256,
/* .maxMeshOutputPrimitivesNV = */ 512,
/* .maxMeshWorkGroupSizeX_NV = */ 32,
/* .maxMeshWorkGroupSizeY_NV = */ 1,
/* .maxMeshWorkGroupSizeZ_NV = */ 1,
/* .maxTaskWorkGroupSizeX_NV = */ 32,
/* .maxTaskWorkGroupSizeY_NV = */ 1,
/* .maxTaskWorkGroupSizeZ_NV = */ 1,
/* .maxMeshViewCountNV = */ 4,
/* .limits = */ {
/* .nonInductiveForLoops = */ 1,
/* .whileLoops = */ 1,
/* .doWhileLoops = */ 1,
/* .generalUniformIndexing = */ 1,
/* .generalAttributeMatrixVectorIndexing = */ 1,
/* .generalVaryingIndexing = */ 1,
/* .generalSamplerIndexing = */ 1,
/* .generalVariableIndexing = */ 1,
/* .generalConstantMatrixVectorIndexing = */ 1,
}};
static bool initialized = false;
extern "C" {
/*
* Takes in a GLSL shader as a string and converts it to SPIR-V in binary form.
*
* |glsl| Null-terminated string containing the shader to be converted.
* |stage_int| Magic number indicating the type of shader being processed.
* Legal values are as follows:
* Vertex = 0
* Fragment = 4
* Compute = 5
* |gen_debug| Flag to indicate if debug information should be generated.
* |spirv| Output parameter for a pointer to the resulting SPIR-V data.
* |spirv_len| Output parameter for the length of the output binary buffer.
*
* Returns a void* pointer which, if not null, must be destroyed by
* destroy_output_buffer.o. (This is not the same pointer returned in |spirv|.)
* If null, the compilation failed.
*/
EMSCRIPTEN_KEEPALIVE
void* convert_glsl_to_spirv(const char* glsl, int stage_int, bool gen_debug, uint32_t** spirv, size_t* spirv_len)
{
if (glsl == nullptr) {
fprintf(stderr, "Input pointer null\n");
return nullptr;
}
if (spirv == nullptr || spirv_len == nullptr) {
fprintf(stderr, "Output pointer null\n");
return nullptr;
}
*spirv = nullptr;
*spirv_len = 0;
if (stage_int != 0 && stage_int != 4 && stage_int != 5) {
fprintf(stderr, "Invalid shader stage\n");
return nullptr;
}
EShLanguage stage = static_cast<EShLanguage>(stage_int);
if (!initialized) {
glslang::InitializeProcess();
initialized = true;
}
glslang::TShader shader(stage);
shader.setStrings(&glsl, 1);
shader.setEnvInput(glslang::EShSourceGlsl, stage, glslang::EShClientVulkan, 100);
shader.setEnvClient(glslang::EShClientVulkan, glslang::EShTargetVulkan_1_1);
shader.setEnvTarget(glslang::EShTargetSpv, glslang::EShTargetSpv_1_3);
if (!shader.parse(&DefaultTBuiltInResource, 100, true, EShMsgDefault)) {
fprintf(stderr, "Parse failed\n");
fprintf(stderr, "%s\n", shader.getInfoLog());
return nullptr;
}
glslang::TProgram program;
program.addShader(&shader);
if (!program.link(EShMsgDefault)) {
fprintf(stderr, "Link failed\n");
fprintf(stderr, "%s\n", program.getInfoLog());
return nullptr;
}
glslang::SpvOptions spvOptions;
spvOptions.generateDebugInfo = gen_debug;
spvOptions.optimizeSize = false;
spvOptions.disassemble = false;
spvOptions.validate = false;
std::vector<uint32_t>* output = new std::vector<uint32_t>;
glslang::GlslangToSpv(*program.getIntermediate(stage), *output, nullptr, &spvOptions);
*spirv_len = output->size();
*spirv = output->data();
return output;
}
/*
* Destroys a buffer created by convert_glsl_to_spirv
*/
EMSCRIPTEN_KEEPALIVE
void destroy_output_buffer(void* p)
{
delete static_cast<std::vector<uint32_t>*>(p);
}
} // extern "C"
/*
* For non-Emscripten builds we supply a generic main, so that the glslang.js
* build target can generate an executable with a trivial use case instead of
* generating a WASM binary. This is done so that there is a target that can be
* built and output analyzed using desktop tools, since WASM binaries are
* specific to the Emscripten toolchain.
*/
#ifndef __EMSCRIPTEN__
int main() {
const char* input = R"(#version 310 es
void main() { })";
uint32_t* output;
size_t output_len;
void* id = convert_glsl_to_spirv(input, 4, false, &output, &output_len);
assert(output != nullptr);
assert(output_len != 0);
destroy_output_buffer(id);
return 0;
}
#endif // ifndef __EMSCRIPTEN__

View File

@ -0,0 +1,45 @@
Module['compileGLSLZeroCopy'] = function(glsl, shader_stage, gen_debug) {
gen_debug = !!gen_debug;
var shader_stage_int;
if (shader_stage === 'vertex') {
shader_stage_int = 0;
} else if (shader_stage === 'fragment') {
shader_stage_int = 4;
} else if (shader_stage === 'compute') {
shader_stage_int = 5;
} else {
throw new Error("shader_stage must be 'vertex', 'fragment', or 'compute'");
}
var p_output = Module['_malloc'](4);
var p_output_len = Module['_malloc'](4);
var id = ccall('convert_glsl_to_spirv',
'number',
['string', 'number', 'boolean', 'number', 'number'],
[glsl, shader_stage_int, gen_debug, p_output, p_output_len]);
var output = getValue(p_output, 'i32');
var output_len = getValue(p_output_len, 'i32');
Module['_free'](p_output);
Module['_free'](p_output_len);
if (id === 0) {
throw new Error('GLSL compilation failed');
}
var ret = {};
var outputIndexU32 = output / 4;
ret['data'] = Module['HEAPU32'].subarray(outputIndexU32, outputIndexU32 + output_len);
ret['free'] = function() {
Module['_destroy_output_buffer'](id);
};
return ret;
};
Module['compileGLSL'] = function(glsl, shader_stage, gen_debug) {
var compiled = Module['compileGLSLZeroCopy'](glsl, shader_stage, gen_debug);
var ret = compiled['data'].slice()
compiled['free']();
return ret;
};

View File

@ -15,6 +15,7 @@ if(WIN32)
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OSDependent
install(TARGETS OSDependent EXPORT OSDependentTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT OSDependentTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)

347
Externals/glslang/glslang/Public/ShaderLang.h vendored Normal file → Executable file
View File

@ -1,6 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
@ -52,9 +53,6 @@
#define SH_IMPORT_EXPORT
#else
#define SH_IMPORT_EXPORT
#ifndef __fastcall
#define __fastcall
#endif
#define C_DECL
#endif
@ -70,7 +68,7 @@
// This should always increase, as some paths to do not consume
// a more major number.
// It should increment by one when new functionality is added.
#define GLSLANG_MINOR_VERSION 7
#define GLSLANG_MINOR_VERSION 13
//
// Call before doing any other compiler/linker operations.
@ -82,7 +80,7 @@ SH_IMPORT_EXPORT int ShInitialize();
//
// Call this at process shutdown to clean up memory.
//
SH_IMPORT_EXPORT int __fastcall ShFinalize();
SH_IMPORT_EXPORT int ShFinalize();
//
// Types of languages the compiler can consume.
@ -94,6 +92,14 @@ typedef enum {
EShLangGeometry,
EShLangFragment,
EShLangCompute,
EShLangRayGenNV,
EShLangIntersectNV,
EShLangAnyHitNV,
EShLangClosestHitNV,
EShLangMissNV,
EShLangCallableNV,
EShLangTaskNV,
EShLangMeshNV,
EShLangCount,
} EShLanguage; // would be better as stage, but this is ancient now
@ -104,6 +110,14 @@ typedef enum {
EShLangGeometryMask = (1 << EShLangGeometry),
EShLangFragmentMask = (1 << EShLangFragment),
EShLangComputeMask = (1 << EShLangCompute),
EShLangRayGenNVMask = (1 << EShLangRayGenNV),
EShLangIntersectNVMask = (1 << EShLangIntersectNV),
EShLangAnyHitNVMask = (1 << EShLangAnyHitNV),
EShLangClosestHitNVMask = (1 << EShLangClosestHitNV),
EShLangMissNVMask = (1 << EShLangMissNV),
EShLangCallableNVMask = (1 << EShLangCallableNV),
EShLangTaskNVMask = (1 << EShLangTaskNV),
EShLangMeshNVMask = (1 << EShLangMeshNV),
} EShLanguageMask;
namespace glslang {
@ -112,33 +126,38 @@ class TType;
typedef enum {
EShSourceNone,
EShSourceGlsl,
EShSourceHlsl,
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL)
EShSourceHlsl, // HLSL
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
typedef enum {
EShClientNone,
EShClientNone, // use when there is no client, e.g. for validation
EShClientVulkan,
EShClientOpenGL,
} EShClient;
typedef enum {
EShTargetNone,
EShTargetSpv, // preferred spelling
EShTargetSpv, // SPIR-V (preferred spelling)
EshTargetSpv = EShTargetSpv, // legacy spelling
} EShTargetLanguage;
typedef enum {
EShTargetVulkan_1_0 = (1 << 22),
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12),
EShTargetOpenGL_450 = 450,
EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1
EShTargetVulkan_1_2 = (1 << 22) | (2 << 12), // Vulkan 1.2
EShTargetOpenGL_450 = 450, // OpenGL
} EShTargetClientVersion;
typedef EShTargetClientVersion EshTargetClientVersion;
typedef enum {
EShTargetSpv_1_0 = (1 << 16),
EShTargetSpv_1_3 = (1 << 16) | (3 << 8),
EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0
EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1
EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2
EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3
EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4
EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5
} EShTargetLanguageVersion;
struct TInputLanguage {
@ -216,8 +235,23 @@ enum EShMessages {
EShMsgDebugInfo = (1 << 10), // save debug information
EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (right now only for samplers)
EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table
};
//
// Options for building reflection
//
typedef enum {
EShReflectionDefault = 0, // default is original behaviour before options were added
EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
} EShReflectionOptions;
//
// Build a table for bindings. This can be used for locating
// attributes, uniforms, globals, etc., as needed.
@ -397,16 +431,45 @@ public:
void setResourceSetBinding(const std::vector<std::string>& base);
void setAutoMapBindings(bool map);
void setAutoMapLocations(bool map);
void addUniformLocationOverride(const char* name, int loc);
void setUniformLocationBase(int base);
void setInvertY(bool invert);
#ifdef ENABLE_HLSL
void setHlslIoMapping(bool hlslIoMap);
void setFlattenUniformArrays(bool flatten);
#endif
void setNoStorageFormat(bool useUnknownFormat);
void setNanMinMaxClamp(bool nanMinMaxClamp);
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
// For setting up the environment (cleared to nothingness in the constructor).
// These must be called so that parsing is done for the right source language and
// target environment, either indirectly through TranslateEnvironment() based on
// EShMessages et. al., or directly by the user.
//
// setEnvInput: The input source language and stage. If generating code for a
// specific client, the input client semantics to use and the
// version of the that client's input semantics to use, otherwise
// use EShClientNone and version of 0, e.g. for validation mode.
// Note 'version' does not describe the target environment,
// just the version of the source dialect to compile under.
//
// See the definitions of TEnvironment, EShSource, EShLanguage,
// and EShClient for choices and more detail.
//
// setEnvClient: The client that will be hosting the execution, and it's version.
// Note 'version' is not the version of the languages involved, but
// the version of the client environment.
// Use EShClientNone and version of 0 if there is no client, e.g.
// for validation mode.
//
// See EShTargetClientVersion for choices.
//
// setEnvTarget: The language to translate to when generating code, and that
// language's version.
// Use EShTargetNone and version of 0 if there is no client, e.g.
// for validation mode.
//
void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
{
environment.input.languageFamily = lang;
@ -424,8 +487,15 @@ public:
environment.target.language = lang;
environment.target.version = version;
}
void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; }
#ifdef ENABLE_HLSL
void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
#else
bool getEnvTargetHlslFunctionality1() const { return false; }
#endif
// Interface to #include handlers.
//
@ -532,6 +602,8 @@ public:
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer);
}
// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
// is not an officially supported or fully working path.
bool preprocess(const TBuiltInResource* builtInResources,
int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages message, std::string* outputString,
@ -574,8 +646,46 @@ private:
TShader& operator=(TShader&);
};
class TReflection;
class TIoMapper;
#ifndef GLSLANG_WEB
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex);
const TType* getType() const { return type; }
int getBinding() const;
void dump() const;
static TObjectReflection badReflection() { return TObjectReflection(); }
std::string name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
int numMembers;
int arrayStride; // stride of an array variable
int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
EShLanguageMask stages;
protected:
TObjectReflection()
: offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0),
topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr)
{
}
const TType* type;
};
class TReflection;
class TIoMapper;
struct TVarEntryInfo;
// Allows to customize the binding layout after linking.
// All used uniform variables will invoke at least validateBinding.
@ -596,53 +706,65 @@ class TIoMapper;
// notifiy callbacks, this phase ends with a call to endNotifications.
// Phase two starts directly after the call to endNotifications
// and calls all other callbacks to validate and to get the
// bindings, sets, locations, component and color indices.
// bindings, sets, locations, component and color indices.
//
// NOTE: that still limit checks are applied to bindings and sets
// and may result in an error.
class TIoMapResolver
{
public:
virtual ~TIoMapResolver() {}
virtual ~TIoMapResolver() {}
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
// Called by mapIO when it starts its symbol collect for teh given stage
virtual void beginCollect(EShLanguage stage) = 0;
// Called by mapIO when it has finished the symbol collect
virtual void endCollect(EShLanguage stage) = 0;
// Called by TSlotCollector to resolve storage locations or bindings
virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by TSlotCollector to resolve resource locations or bindings
virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline
virtual void addStage(EShLanguage stage) = 0;
};
#endif // GLSLANG_WEB
// Make one TProgram per set of shaders that will get linked together. Add all
// the shaders that are to be linked together. After calling shader.parse()
// for all shaders, call link().
@ -654,7 +776,7 @@ public:
TProgram();
virtual ~TProgram();
void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
std::list<TShader*>& getShaders(EShLanguage stage) { return stages[stage]; }
// Link Validation interface
bool link(EShMessages);
const char* getInfoLog();
@ -662,35 +784,101 @@ public:
TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
#ifndef GLSLANG_WEB
// Reflection Interface
bool buildReflection(); // call first, to do liveness analysis, index mapping, etc.; returns false on failure
int getNumLiveUniformVariables() const; // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformBlocks() const; // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
const char* getUniformName(int index) const; // can be used for "name" part of glGetActiveUniform()
const char* getUniformBlockName(int blockIndex) const; // can be used for glGetActiveUniformBlockName()
int getUniformBlockSize(int blockIndex) const; // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformIndex(const char* name) const; // can be used for glGetUniformIndices()
int getUniformBinding(int index) const; // returns the binding number
int getUniformBlockBinding(int index) const; // returns the block binding number
int getUniformBlockIndex(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockCounterIndex(int index) const; // returns block index of associated counter.
int getUniformType(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformBufferOffset(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformArraySize(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getNumLiveAttributes() const; // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
// call first, to do liveness analysis, index mapping, etc.; returns false on failure
bool buildReflection(int opts = EShReflectionDefault);
unsigned getLocalSize(int dim) const; // return dim'th local size
const char *getAttributeName(int index) const; // can be used for glGetActiveAttrib()
int getAttributeType(int index) const; // can be used for glGetActiveAttrib()
const TType* getUniformTType(int index) const; // returns a TType*
const TType* getUniformBlockTType(int index) const; // returns a TType*
const TType* getAttributeTType(int index) const; // returns a TType*
int getReflectionIndex(const char *name) const;
int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const;
int getNumUniformVariables() const;
const TObjectReflection& getUniform(int index) const;
int getNumUniformBlocks() const;
const TObjectReflection& getUniformBlock(int index) const;
int getNumPipeInputs() const;
const TObjectReflection& getPipeInput(int index) const;
int getNumPipeOutputs() const;
const TObjectReflection& getPipeOutput(int index) const;
int getNumBufferVariables() const;
const TObjectReflection& getBufferVariable(int index) const;
int getNumBufferBlocks() const;
const TObjectReflection& getBufferBlock(int index) const;
int getNumAtomicCounters() const;
const TObjectReflection& getAtomicCounter(int index) const;
// Legacy Reflection Interface - expressed in terms of above interface
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformVariables() const { return getNumUniformVariables(); }
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); }
// can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
int getNumLiveAttributes() const { return getNumPipeInputs(); }
// can be used for glGetUniformIndices()
int getUniformIndex(const char *name) const { return getReflectionIndex(name); }
int getPipeIOIndex(const char *name, const bool inOrOut) const
{ return getReflectionPipeIOIndex(name, inOrOut); }
// can be used for "name" part of glGetActiveUniform()
const char *getUniformName(int index) const { return getUniform(index).name.c_str(); }
// returns the binding number
int getUniformBinding(int index) const { return getUniform(index).getBinding(); }
// returns Shaders Stages where a Uniform is present
EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockIndex(int index) const { return getUniform(index).index; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformType(int index) const { return getUniform(index).glDefineType; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformBufferOffset(int index) const { return getUniform(index).offset; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getUniformArraySize(int index) const { return getUniform(index).size; }
// returns a TType*
const TType *getUniformTType(int index) const { return getUniform(index).getType(); }
// can be used for glGetActiveUniformBlockName()
const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); }
// can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformBlockSize(int index) const { return getUniformBlock(index).size; }
// returns the block binding number
int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); }
// returns block index of associated counter.
int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; }
// returns a TType*
const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); }
// can be used for glGetActiveAttrib()
const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); }
// can be used for glGetActiveAttrib()
int getAttributeType(int index) const { return getPipeInput(index).glDefineType; }
// returns a TType*
const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); }
void dumpReflection();
// I/O mapping: apply base offsets and map live unbound variables
// If resolver is not provided it uses the previous approach
// and respects auto assignment and offsets.
bool mapIO(TIoMapResolver* resolver = NULL);
bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr);
#endif
protected:
bool linkStage(EShLanguage, EShMessages);
@ -700,8 +888,9 @@ protected:
TIntermediate* intermediate[EShLangCount];
bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
TInfoSink* infoSink;
#ifndef GLSLANG_WEB
TReflection* reflection;
TIoMapper* ioMapper;
#endif
bool linked;
private:

View File

@ -1,3 +1,16 @@
#!/usr/bin/env bash
#!/bin/bash
if [ "$1" = 'web' ]
then
m4 -P -DGLSLANG_WEB MachineIndependent/glslang.m4 > MachineIndependent/glslang.y
elif [ "$#" -eq 0 ]
then
m4 -P MachineIndependent/glslang.m4 > MachineIndependent/glslang.y
else
echo usage:
echo $0 web
echo $0
exit
fi
bison --defines=MachineIndependent/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o MachineIndependent/glslang_tab.cpp

57
Externals/glslang/gtests/AST.FromFile.cpp vendored Executable file → Normal file
View File

@ -41,26 +41,22 @@ namespace {
using CompileToAstTest = GlslangTest<::testing::TestWithParam<std::string>>;
#ifdef NV_EXTENSIONS
using CompileToAstTestNV = GlslangTest<::testing::TestWithParam<std::string>>;
#endif
TEST_P(CompileToAstTest, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam(),
Source::GLSL, Semantics::OpenGL, glslang::EShTargetVulkan_1_0,
Source::GLSL, Semantics::OpenGL, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::AST);
}
#ifdef NV_EXTENSIONS
// Compiling GLSL to SPIR-V under OpenGL semantics (NV extensions enabled).
TEST_P(CompileToAstTestNV, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam(),
Source::GLSL, Semantics::OpenGL, glslang::EShTargetVulkan_1_0,
Source::GLSL, Semantics::OpenGL, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::AST);
}
#endif
// clang-format off
INSTANTIATE_TEST_CASE_P(
@ -93,9 +89,13 @@ INSTANTIATE_TEST_CASE_P(
"cppSimple.vert",
"cppIndent.vert",
"cppIntMinOverNegativeOne.frag",
"cppMerge.frag",
"cppNest.vert",
"cppBad.vert",
"cppBad2.vert",
"cppBad3.vert",
"cppBad4.vert",
"cppBad5.vert",
"cppComplexExpr.vert",
"cppDeepNest.frag",
"cppPassMacroName.frag",
@ -113,6 +113,7 @@ INSTANTIATE_TEST_CASE_P(
"300operations.frag",
"300block.frag",
"300samplerExternal.frag",
"300samplerExternalYUV.frag",
"310.comp",
"310.vert",
"310.geom",
@ -120,6 +121,7 @@ INSTANTIATE_TEST_CASE_P(
"310.tesc",
"310.tese",
"310implicitSizeArrayError.vert",
"310.inheritMemory.frag",
"310AofA.vert",
"310runtimeArray.vert",
"320.comp",
@ -231,11 +233,51 @@ INSTANTIATE_TEST_CASE_P(
"precise_struct_block.vert",
"maxClipDistances.vert",
"findFunction.frag",
"constantUnaryConversion.comp",
"glsl.450.subgroup.frag",
"glsl.450.subgroup.geom",
"glsl.450.subgroup.tesc",
"glsl.450.subgroup.tese",
"glsl.450.subgroup.vert",
"glsl.450.subgroupArithmetic.comp",
"glsl.450.subgroupBasic.comp",
"glsl.450.subgroupBallot.comp",
"glsl.450.subgroupBallotNeg.comp",
"glsl.450.subgroupClustered.comp",
"glsl.450.subgroupClusteredNeg.comp",
"glsl.450.subgroupPartitioned.comp",
"glsl.450.subgroupShuffle.comp",
"glsl.450.subgroupShuffleRelative.comp",
"glsl.450.subgroupQuad.comp",
"glsl.450.subgroupVote.comp",
"glsl.460.subgroup.mesh",
"glsl.460.subgroup.task",
"glsl.460.subgroup.rahit",
"glsl.460.subgroup.rcall",
"glsl.460.subgroup.rchit",
"glsl.460.subgroup.rgen",
"glsl.460.subgroup.rint",
"glsl.460.subgroup.rmiss",
"glsl.es320.subgroup.frag",
"glsl.es320.subgroup.geom",
"glsl.es320.subgroup.tesc",
"glsl.es320.subgroup.tese",
"glsl.es320.subgroup.vert",
"glsl.es320.subgroupArithmetic.comp",
"glsl.es320.subgroupBasic.comp",
"glsl.es320.subgroupBallot.comp",
"glsl.es320.subgroupBallotNeg.comp",
"glsl.es320.subgroupClustered.comp",
"glsl.es320.subgroupClusteredNeg.comp",
"glsl.es320.subgroupPartitioned.comp",
"glsl.es320.subgroupShuffle.comp",
"glsl.es320.subgroupShuffleRelative.comp",
"glsl.es320.subgroupQuad.comp",
"glsl.es320.subgroupVote.comp",
})),
FileNameAsCustomTestSuffix
);
#ifdef NV_EXTENSIONS
INSTANTIATE_TEST_CASE_P(
Glsl, CompileToAstTestNV,
::testing::ValuesIn(std::vector<std::string>({
@ -243,7 +285,6 @@ INSTANTIATE_TEST_CASE_P(
})),
FileNameAsCustomTestSuffix
);
#endif
// clang-format on
} // anonymous namespace

View File

@ -20,17 +20,22 @@ if(BUILD_TESTING)
${CMAKE_CURRENT_SOURCE_DIR}/Link.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Link.FromFile.Vk.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Pp.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Spv.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Spv.FromFile.cpp)
# -- Remapper tests
${CMAKE_CURRENT_SOURCE_DIR}/Remap.FromFile.cpp)
if(ENABLE_SPVREMAPPER)
set(TEST_SOURCES ${TEST_SOURCES}
${CMAKE_CURRENT_SOURCE_DIR}/Remap.FromFile.cpp)
endif()
glslang_pch(TEST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/pch.cpp)
add_executable(glslangtests ${TEST_SOURCES})
set_property(TARGET glslangtests PROPERTY FOLDER tests)
glslang_set_link_args(glslangtests)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS glslangtests
install(TARGETS glslangtests EXPORT glslangtestsTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
install(EXPORT glslangtestsTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)
set(GLSLANG_TEST_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../Test")
@ -46,8 +51,13 @@ if(BUILD_TESTING)
${gtest_SOURCE_DIR}/include)
set(LIBRARIES
SPVRemapper glslang OSDependent OGLCompiler glslang
glslang OSDependent OGLCompiler glslang
SPIRV glslang-default-resource-limits)
if(ENABLE_SPVREMAPPER)
set(LIBRARIES ${LIBRARIES} SPVRemapper)
endif()
if(ENABLE_HLSL)
set(LIBRARIES ${LIBRARIES} HLSL)
endif(ENABLE_HLSL)

View File

@ -51,6 +51,7 @@ TEST_P(ConfigTest, FromFile)
{
TestCaseSpec testCase = GetParam();
GlslangResult result;
result.validationResult = true;
// Get the contents for input shader and limit configurations.
std::string shaderContents, configContents;
@ -99,7 +100,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::ValuesIn(std::vector<TestCaseSpec>({
{"specExamples.vert", "baseResults/test.conf", "specExamplesConf.vert.out", (EShMessages)(EShMsgAST | EShMsgCascadingErrors)},
{"100Limits.vert", "100.conf", "100LimitsConf.vert.out", EShMsgCascadingErrors},
})),
}))
);
// clang-format on

View File

@ -127,7 +127,7 @@ INSTANTIATE_TEST_CASE_P(
{float(ldexp(1.0, -127) / 2.0 + (ldexp(1.0, -127) / 4.0f)),
"0x1.8p-128"},
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float32NanTests, HexFloatTest,
@ -145,7 +145,7 @@ INSTANTIATE_TEST_CASE_P(
{uint32_t(0x7f800c00), "0x1.0018p+128"}, // +nan
{uint32_t(0x7F80F000), "0x1.01ep+128"}, // +nan
{uint32_t(0x7FFFFFFF), "0x1.fffffep+128"}, // +nan
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float64Tests, HexDoubleTest,
@ -218,7 +218,7 @@ INSTANTIATE_TEST_CASE_P(
{ldexp(1.0, -1023) / 2.0 + (ldexp(1.0, -1023) / 4.0),
"0x1.8p-1024"},
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float64NanTests, HexDoubleTest,
@ -237,7 +237,7 @@ INSTANTIATE_TEST_CASE_P(
{uint64_t(0x7FF0000000000001LL), "0x1.0000000000001p+1024"}, // -nan
{uint64_t(0x7FF0000300000000LL), "0x1.00003p+1024"}, // -nan
{uint64_t(0x7FFFFFFFFFFFFFFFLL), "0x1.fffffffffffffp+1024"}, // -nan
})),);
})));
TEST(HexFloatStreamTest, OperatorLeftShiftPreservesFloatAndFill) {
std::stringstream s;
@ -282,7 +282,7 @@ INSTANTIATE_TEST_CASE_P(
{"0xFFp+0", 255.f},
{"0x0.8p+0", 0.5f},
{"0x0.4p+0", 0.25f},
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float32DecodeInfTests, DecodeHexFloatTest,
@ -292,7 +292,7 @@ INSTANTIATE_TEST_CASE_P(
{"0x32p+127", uint32_t(0x7F800000)}, // inf
{"0x32p+500", uint32_t(0x7F800000)}, // inf
{"-0x32p+127", uint32_t(0xFF800000)}, // -inf
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float64DecodeTests, DecodeHexDoubleTest,
@ -315,7 +315,7 @@ INSTANTIATE_TEST_CASE_P(
{"0xFFp+0", 255.},
{"0x0.8p+0", 0.5},
{"0x0.4p+0", 0.25},
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float64DecodeInfTests, DecodeHexDoubleTest,
@ -326,7 +326,7 @@ INSTANTIATE_TEST_CASE_P(
{"0x32p+1023", uint64_t(0x7FF0000000000000)}, // inf
{"0x32p+5000", uint64_t(0x7FF0000000000000)}, // inf
{"-0x32p+1023", uint64_t(0xFFF0000000000000)}, // -inf
})),);
})));
TEST(FloatProxy, ValidConversion) {
EXPECT_THAT(FloatProxy<float>(1.f).getAsFloat(), Eq(1.0f));
@ -495,7 +495,7 @@ INSTANTIATE_TEST_CASE_P(
{std::numeric_limits<float>::infinity(), "0x1p+128"},
{-std::numeric_limits<float>::infinity(), "-0x1p+128"},
})),);
})));
INSTANTIATE_TEST_CASE_P(
Float64Tests, FloatProxyDoubleTest,
@ -532,7 +532,7 @@ INSTANTIATE_TEST_CASE_P(
{std::numeric_limits<double>::infinity(), "0x1p+1024"},
{-std::numeric_limits<double>::infinity(), "-0x1p+1024"},
})),);
})));
// double is used so that unbiased_exponent can be used with the output
// of ldexp directly.
@ -793,7 +793,7 @@ INSTANTIATE_TEST_CASE_P(F32ToF16, HexFloatRoundTest,
{static_cast<float>(ldexp(float_fractions({0, 1, 11, 13}), -129)), std::make_pair(half_bits_set({0, 9}), false), spvutils::kRoundToPositiveInfinity},
{static_cast<float>(ldexp(float_fractions({0, 1, 11, 13}), -131)), std::make_pair(half_bits_set({0}), false), spvutils::kRoundToNegativeInfinity},
{static_cast<float>(ldexp(float_fractions({0, 1, 11, 13}), -130)), std::make_pair(half_bits_set({0, 9}), false), spvutils::kRoundToNearestEven},
})),);
})));
// clang-format on
struct UpCastSignificandCase {
@ -837,7 +837,7 @@ INSTANTIATE_TEST_CASE_P(F16toF32, HexFloatRoundUpSignificandTest,
{0x0F00, 0x600000},
{0x0F01, 0x602000},
{0x0FFF, 0x7FE000},
})),);
})));
struct DownCastTest {
float source_float;
@ -914,7 +914,7 @@ INSTANTIATE_TEST_CASE_P(F32ToF16, HexFloatFP32To16Tests,
{-std::numeric_limits<float>::infinity(), negative_infinity, {spvutils::kRoundToZero, spvutils::kRoundToPositiveInfinity, spvutils::kRoundToNegativeInfinity, spvutils::kRoundToNearestEven}},
// Nans are below because we cannot test for equality.
})),);
})));
struct UpCastCase{
uint16_t source_half;
@ -965,7 +965,7 @@ INSTANTIATE_TEST_CASE_P(F16ToF32, HexFloatFP16To32Tests,
// inf
{0x7C00, std::numeric_limits<float>::infinity()},
{0xFC00, -std::numeric_limits<float>::infinity()},
})),);
})));
TEST(HexFloatOperationTests, NanTests) {
using HF = spvutils::HexFloat<spvutils::FloatProxy<float>>;
@ -1071,7 +1071,7 @@ INSTANTIATE_TEST_CASE_P(
// We can't have -1e40 and negate_value == true since
// that represents an original case of "--1e40" which
// is invalid.
}),);
}));
using ParseNormalFloat16Test =
::testing::TestWithParam<FloatParseCase<Float16>>;
@ -1114,7 +1114,7 @@ INSTANTIATE_TEST_CASE_P(
BadFloatParseCase<Float16>("-2.0", true, uint16_t{0}),
BadFloatParseCase<Float16>("+0.0", true, uint16_t{0}),
BadFloatParseCase<Float16>("+2.0", true, uint16_t{0}),
}),);
}));
// A test case for detecting infinities.
template <typename T>
@ -1149,7 +1149,7 @@ INSTANTIATE_TEST_CASE_P(
{"-1e40", false, -FLT_MAX},
{"1e400", false, FLT_MAX},
{"-1e400", false, -FLT_MAX},
})),);
})));
using FloatProxyParseOverflowDoubleTest =
::testing::TestWithParam<OverflowParseCase<double>>;
@ -1176,7 +1176,7 @@ INSTANTIATE_TEST_CASE_P(
{"-1e40", true, -1e40},
{"1e400", false, DBL_MAX},
{"-1e400", false, -DBL_MAX},
})),);
})));
using FloatProxyParseOverflowFloat16Test =
::testing::TestWithParam<OverflowParseCase<uint16_t>>;
@ -1207,7 +1207,7 @@ INSTANTIATE_TEST_CASE_P(
{"-1e38", false, uint16_t{0xfbff}},
{"-1e40", false, uint16_t{0xfbff}},
{"-1e400", false, uint16_t{0xfbff}},
})),);
})));
TEST(FloatProxy, Max) {
EXPECT_THAT(FloatProxy<Float16>::max().getAsFloat().get_value(),

77
Externals/glslang/gtests/Hlsl.FromFile.cpp vendored Normal file → Executable file
View File

@ -61,20 +61,23 @@ using HlslCompileTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointP
using HlslVulkan1_1CompileTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointPair>>;
using HlslCompileAndFlattenTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointPair>>;
using HlslLegalizeTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointPair>>;
using HlslDebugTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointPair>>;
using HlslDX9CompatibleTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointPair>>;
using HlslLegalDebugTest = GlslangTest<::testing::TestWithParam<FileNameEntryPointPair>>;
// Compiling HLSL to pre-legalized SPIR-V under Vulkan semantics. Expected
// to successfully generate both AST and SPIR-V.
TEST_P(HlslCompileTest, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam().fileName,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::BothASTAndSpv, true, GetParam().entryPoint);
}
TEST_P(HlslVulkan1_1CompileTest, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam().fileName,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_1,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_1, glslang::EShTargetSpv_1_3,
Target::BothASTAndSpv, true, GetParam().entryPoint);
}
@ -90,11 +93,41 @@ TEST_P(HlslCompileAndFlattenTest, FromFile)
TEST_P(HlslLegalizeTest, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam().fileName,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::Spv, true, GetParam().entryPoint,
"/baseLegalResults/", true);
}
// Compiling HLSL to pre-legalized SPIR-V. Expected to successfully generate
// SPIR-V with debug instructions, particularly line info.
TEST_P(HlslDebugTest, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam().fileName,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::Spv, true, GetParam().entryPoint,
"/baseResults/", false, true);
}
TEST_P(HlslDX9CompatibleTest, FromFile)
{
loadFileCompileAndCheckWithOptions(GlobalTestSettings.testRoot, GetParam().fileName, Source::HLSL,
Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::BothASTAndSpv, true,
GetParam().entryPoint, "/baseResults/",
EShMessages::EShMsgHlslDX9Compatible);
}
// Compiling HLSL to legalized SPIR-V with debug instructions. Expected to
// successfully generate SPIR-V with debug instructions preserved through
// legalization, particularly line info.
TEST_P(HlslLegalDebugTest, FromFile)
{
loadFileCompileAndCheck(GlobalTestSettings.testRoot, GetParam().fileName,
Source::HLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::Spv, true, GetParam().entryPoint,
"/baseResults/", true, true);
}
// clang-format off
INSTANTIATE_TEST_CASE_P(
ToSpirv, HlslCompileTest,
@ -153,6 +186,7 @@ INSTANTIATE_TEST_CASE_P(
{"hlsl.depthLess.frag", "PixelShaderFunction"},
{"hlsl.discard.frag", "PixelShaderFunction"},
{"hlsl.doLoop.frag", "PixelShaderFunction"},
{"hlsl.earlydepthstencil.frag", "main"},
{"hlsl.emptystructreturn.frag", "main"},
{"hlsl.emptystructreturn.vert", "main"},
{"hlsl.emptystruct.init.vert", "main"},
@ -196,6 +230,7 @@ INSTANTIATE_TEST_CASE_P(
{"hlsl.hull.void.tesc", "main"},
{"hlsl.hull.ctrlpt-1.tesc", "main"},
{"hlsl.hull.ctrlpt-2.tesc", "main"},
{"hlsl.format.rwtexture.frag", "main"},
{"hlsl.groupid.comp", "main"},
{"hlsl.identifier.sample.frag", "main"},
{"hlsl.if.frag", "PixelShaderFunction"},
@ -317,7 +352,9 @@ INSTANTIATE_TEST_CASE_P(
{"hlsl.shapeConvRet.frag", "main"},
{"hlsl.self_cast.frag", "main"},
{"hlsl.snorm.uav.comp", "main"},
{"hlsl.specConstant.frag", "main"},
{"hlsl.staticMemberFunction.frag", "main"},
{"hlsl.staticFuncInit.frag", "main"},
{"hlsl.store.rwbyteaddressbuffer.type.comp", "main"},
{"hlsl.stringtoken.frag", "main"},
{"hlsl.string.frag", "main"},
@ -381,7 +418,8 @@ INSTANTIATE_TEST_CASE_P(
{"hlsl.typeGraphCopy.vert", "main"},
{"hlsl.typedef.frag", "PixelShaderFunction"},
{"hlsl.whileLoop.frag", "PixelShaderFunction"},
{"hlsl.void.frag", "PixelShaderFunction"}
{"hlsl.void.frag", "PixelShaderFunction"},
{"hlsl.type.type.conversion.all.frag", "main"}
}),
FileNameAsCustomTestSuffix
);
@ -398,6 +436,8 @@ INSTANTIATE_TEST_CASE_P(
{"hlsl.wavequery.frag", "PixelShaderFunction"},
{"hlsl.wavereduction.comp", "CSMain"},
{"hlsl.wavevote.comp", "CSMain"},
{ "hlsl.type.type.conversion.valid.frag", "main" },
{"hlsl.int.dot.frag", "main"}
}),
FileNameAsCustomTestSuffix
);
@ -433,5 +473,34 @@ INSTANTIATE_TEST_CASE_P(
// clang-format on
#endif
// clang-format off
INSTANTIATE_TEST_CASE_P(
ToSpirv, HlslDebugTest,
::testing::ValuesIn(std::vector<FileNameEntryPointPair>{
{"hlsl.pp.line2.frag", "MainPs"}
}),
FileNameAsCustomTestSuffix
);
INSTANTIATE_TEST_CASE_P(
ToSpirv, HlslDX9CompatibleTest,
::testing::ValuesIn(std::vector<FileNameEntryPointPair>{
{"hlsl.sample.dx9.frag", "main"},
{"hlsl.sample.dx9.vert", "main"},
}),
FileNameAsCustomTestSuffix
);
// clang-format off
INSTANTIATE_TEST_CASE_P(
ToSpirv, HlslLegalDebugTest,
::testing::ValuesIn(std::vector<FileNameEntryPointPair>{
{"hlsl.pp.line4.frag", "MainPs"}
}),
FileNameAsCustomTestSuffix
);
// clang-format on
} // anonymous namespace
} // namespace glslangtest

View File

@ -77,16 +77,16 @@ TEST_P(LinkTestVulkan, FromFile)
if (success && (controls & EShMsgSpvRules)) {
spv::SpvBuildLogger logger;
std::vector<uint32_t> spirv_binary;
glslang::SpvOptions options;
options.disableOptimizer = true;
options().disableOptimizer = true;
glslang::GlslangToSpv(*program.getIntermediate(shaders.front()->getStage()),
spirv_binary, &logger, &options);
spirv_binary, &logger, &options());
std::ostringstream disassembly_stream;
spv::Parameterize();
spv::Disassemble(disassembly_stream, spirv_binary);
result.spirvWarningsErrors = logger.getAllMessages();
result.spirv = disassembly_stream.str();
result.validationResult = !options().validate || logger.getAllMessages().empty();
}
std::ostringstream stream;
@ -98,7 +98,8 @@ TEST_P(LinkTestVulkan, FromFile)
std::string expectedOutput;
tryLoadFile(expectedOutputFname, "expected output", &expectedOutput);
checkEqAndUpdateIfRequested(expectedOutput, stream.str(), expectedOutputFname);
checkEqAndUpdateIfRequested(expectedOutput, stream.str(), expectedOutputFname,
result.spirvWarningsErrors);
}
// clang-format off
@ -106,7 +107,8 @@ INSTANTIATE_TEST_CASE_P(
Glsl, LinkTestVulkan,
::testing::ValuesIn(std::vector<std::vector<std::string>>({
{"link1.vk.frag", "link2.vk.frag"},
})),
{"spv.unit1.frag", "spv.unit2.frag", "spv.unit3.frag"},
}))
);
// clang-format on

View File

@ -50,6 +50,7 @@ TEST_P(LinkTest, FromFile)
const size_t fileCount = fileNames.size();
const EShMessages controls = DeriveOptions(Source::GLSL, Semantics::OpenGL, Target::AST);
GlslangResult result;
result.validationResult = true;
// Compile each input shader file.
std::vector<std::unique_ptr<glslang::TShader>> shaders;
@ -100,7 +101,7 @@ INSTANTIATE_TEST_CASE_P(
{"max_vertices_0.geom"},
{"es-link1.frag", "es-link2.frag"},
{"missingBodies.vert"}
})),
}))
);
// clang-format on

Some files were not shown because too many files have changed in this diff Show More