diff --git a/Makefile.common b/Makefile.common index 4b0b631674..bc041798fb 100644 --- a/Makefile.common +++ b/Makefile.common @@ -129,7 +129,6 @@ OBJ += frontend/frontend.o \ runloop.o \ libretro-common/algorithms/mismatch.o \ libretro-common/queues/task_queue.o \ - tasks/tasks_internal.o \ tasks/task_content.o \ tasks/task_save_ram.o \ tasks/task_save_state.o \ @@ -150,6 +149,7 @@ OBJ += frontend/frontend.o \ libretro-common/file/nbio/nbio_stdio.o \ libretro-common/file/file_path.o \ file_path_special.o \ + file_path_str.o \ libretro-common/hash/rhash.o \ audio/audio_driver.o \ input/input_driver.o \ @@ -486,20 +486,8 @@ ifeq ($(HAVE_MENU_COMMON), 1) PSEUDO_NS := endif -ifeq ($(HAVE_LANGEXTRA), 1) -OBJ += menu/intl/menu_hash_de.o \ - menu/intl/menu_hash_es.o \ - menu/intl/menu_hash_eo.o \ - menu/intl/menu_hash_fr.o \ - menu/intl/menu_hash_it.o \ - menu/intl/menu_hash_nl.o \ - menu/intl/menu_hash_pl.o \ - menu/intl/menu_hash_pt.o -endif - OBJ += menu/menu_hash.o \ - menu/menu_driver.o \ + OBJ += menu/menu_driver.o \ menu/menu_content.o \ - menu/intl/menu_hash_us$(PSEUDO_NS).o \ menu/menu_input.o \ menu/menu_entry.o \ menu/menu_entries.o \ @@ -728,6 +716,16 @@ ifeq ($(HAVE_GL_CONTEXT), 1) endif endif +ifeq ($(HAVE_FFMPEG), 1) +ifneq ($(C89_BUILD), 1) +ifneq ($(C90_BUILD), 1) +ifneq ($(HAVE_GLES), 1) + OBJ += cores/libretro-ffmpeg/fft/fft.o + DEFINES += -Ideps -DHAVE_GL_FFT +endif +endif +endif +endif ifeq ($(HAVE_GLES), 1) LIBS += $(GLES_LIBS) @@ -1087,6 +1085,8 @@ endif ifeq ($(HAVE_FFMPEG), 1) OBJ += record/drivers/record_ffmpeg.o \ cores/libretro-ffmpeg/ffmpeg_core.o + + LIBS += $(AVCODEC_LIBS) $(AVFORMAT_LIBS) $(AVUTIL_LIBS) $(SWSCALE_LIBS) $(SWRESAMPLE_LIBS) $(FFMPEG_LIBS) DEFINES += $(AVCODEC_CFLAGS) $(AVFORMAT_CFLAGS) $(AVUTIL_CFLAGS) $(SWSCALE_CFLAGS) $(SWRESAMPLE_CFLAGS) DEFINES += -Wno-deprecated-declarations -DHAVE_FFMPEG -Iffmpeg diff --git a/Makefile.ps3.salamander b/Makefile.ps3.salamander index 1fea230bbe..3abf7340c1 100644 --- a/Makefile.ps3.salamander +++ b/Makefile.ps3.salamander @@ -33,6 +33,7 @@ PPU_SRCS = frontend/frontend_salamander.c \ libretro-common/compat/compat_strl.c \ libretro-common/streams/file_stream.c \ libretro-common/file/config_file.c \ + file_path_str.c \ verbosity.c ifeq ($(HAVE_LOGGER), 1) diff --git a/Makefile.psp1.salamander b/Makefile.psp1.salamander index 37c1f3f24d..fe26f85c52 100644 --- a/Makefile.psp1.salamander +++ b/Makefile.psp1.salamander @@ -45,6 +45,7 @@ OBJS = frontend/frontend_salamander.o \ libretro-common/streams/file_stream.o \ libretro-common/file/retro_stat.o \ libretro-common/hash/rhash.o \ + file_path_str.o \ verbosity.o \ bootstrap/psp1/kernel_functions.o diff --git a/audio/audio_filters/fft/fft.h b/audio/audio_filters/fft/fft.h index 4952e8e04a..64df4cc042 100644 --- a/audio/audio_filters/fft/fft.h +++ b/audio/audio_filters/fft/fft.h @@ -17,61 +17,10 @@ #define RARCH_FFT_H__ #include +#include typedef struct fft fft_t; -// C99 would be nice. -typedef struct -{ - float real; - float imag; -} fft_complex_t; - -static INLINE fft_complex_t fft_complex_mul(fft_complex_t a, - fft_complex_t b) -{ - fft_complex_t out = { - a.real * b.real - a.imag * b.imag, - a.imag * b.real + a.real * b.imag, - }; - - return out; - -} - -static INLINE fft_complex_t fft_complex_add(fft_complex_t a, - fft_complex_t b) -{ - fft_complex_t out = { - a.real + b.real, - a.imag + b.imag, - }; - - return out; - -} - -static INLINE fft_complex_t fft_complex_sub(fft_complex_t a, - fft_complex_t b) -{ - fft_complex_t out = { - a.real - b.real, - a.imag - b.imag, - }; - - return out; - -} - -static INLINE fft_complex_t fft_complex_conj(fft_complex_t a) -{ - fft_complex_t out = { - a.real, -a.imag, - }; - - return out; -} - fft_t *fft_new(unsigned block_size_log2); void fft_free(fft_t *fft); diff --git a/cheevos.c b/cheevos.c index b13872e08a..2c79a59e58 100644 --- a/cheevos.c +++ b/cheevos.c @@ -2075,8 +2075,8 @@ bool cheevos_load(const void *data) cheevos_locals.loaded = 0; - /* Just return OK if cheevos are disabled, the core doesn't support cheevos, or info is NULL. */ - if (!settings->cheevos.enable || !cheevos_locals.core_supports || !info) + /* Just return OK if the core doesn't support cheevos, or info is NULL. */ + if (!cheevos_locals.core_supports || !info) return true; cheevos_locals.meminfo[0].id = RETRO_MEMORY_SYSTEM_RAM; @@ -2091,8 +2091,11 @@ bool cheevos_load(const void *data) cheevos_locals.meminfo[3].id = RETRO_MEMORY_RTC; core_get_memory(&cheevos_locals.meminfo[3]); - /* The the supported extensions as a hint to what method we should use. */ + /* Bail out if cheevos are disabled. But set the above anyways, command_read_ram needs it. */ + if (!settings->cheevos.enable) + return true; + /* Use the supported extensions as a hint to what method we should use. */ core_get_system_info(&sysinfo); for (i = 0; i < sizeof(finders) / sizeof(finders[0]); i++) @@ -2185,9 +2188,13 @@ void cheevos_populate_menu(void *data) settings_t *settings = config_get_ptr(); menu_displaylist_info_t *info = (menu_displaylist_info_t*)data; - menu_entries_add(info->list, "Unlocked Achievements:", - "", MENU_SETTINGS_CHEEVOS_NONE, 0, 0); - menu_entries_add(info->list, "", "", MENU_SETTINGS_CHEEVOS_NONE, 0, 0); + menu_entries_add_enum(info->list, + msg_hash_to_str(MENU_ENUM_LABEL_VALUE_CHEEVOS_UNLOCKED_ACHIEVEMENTS), + msg_hash_to_str(MENU_ENUM_LABEL_CHEEVOS_UNLOCKED_ACHIEVEMENTS), + MENU_ENUM_LABEL_CHEEVOS_UNLOCKED_ACHIEVEMENTS, + MENU_SETTINGS_CHEEVOS_NONE, 0, 0); + menu_entries_add_enum(info->list, "", "", MSG_UNKNOWN, + MENU_SETTINGS_CHEEVOS_NONE, 0, 0); cheevo = cheevos_locals.core.cheevos; end = cheevos_locals.core.cheevos + cheevos_locals.core.count; @@ -2195,8 +2202,9 @@ void cheevos_populate_menu(void *data) for (i = 0; cheevo < end; i++, cheevo++) { if (!cheevo->active) - menu_entries_add(info->list, cheevo->title, - cheevo->description, MENU_SETTINGS_CHEEVOS_START + i, 0, 0); + menu_entries_add_enum(info->list, cheevo->title, + cheevo->description, MSG_UNKNOWN, + MENU_SETTINGS_CHEEVOS_START + i, 0, 0); } if (settings->cheevos.test_unofficial) @@ -2208,15 +2216,21 @@ void cheevos_populate_menu(void *data) for (i = cheevos_locals.core.count; cheevo < end; i++, cheevo++) { if (!cheevo->active) - menu_entries_add(info->list, cheevo->title, - cheevo->description, MENU_SETTINGS_CHEEVOS_START + i, 0, 0); + menu_entries_add_enum(info->list, cheevo->title, + cheevo->description, MSG_UNKNOWN, + MENU_SETTINGS_CHEEVOS_START + i, 0, 0); } } - menu_entries_add(info->list, "", "", MENU_SETTINGS_CHEEVOS_NONE, 0, 0); - menu_entries_add(info->list, "Locked Achievements:", "", + menu_entries_add_enum(info->list, "", "", MSG_UNKNOWN, + MENU_SETTINGS_CHEEVOS_NONE, 0, 0); + menu_entries_add_enum(info->list, + msg_hash_to_str(MENU_ENUM_LABEL_VALUE_CHEEVOS_LOCKED_ACHIEVEMENTS), + msg_hash_to_str(MENU_ENUM_LABEL_CHEEVOS_LOCKED_ACHIEVEMENTS), + MENU_ENUM_LABEL_CHEEVOS_LOCKED_ACHIEVEMENTS, + MENU_SETTINGS_CHEEVOS_NONE, 0, 0); + menu_entries_add_enum(info->list, "", "", MSG_UNKNOWN, MENU_SETTINGS_CHEEVOS_NONE, 0, 0); - menu_entries_add(info->list, "", "", MENU_SETTINGS_CHEEVOS_NONE, 0, 0); cheevo = cheevos_locals.core.cheevos; end = cheevos_locals.core.cheevos + cheevos_locals.core.count; @@ -2224,8 +2238,9 @@ void cheevos_populate_menu(void *data) for (i = 0; cheevo < end; i++, cheevo++) { if (cheevo->active) - menu_entries_add(info->list, cheevo->title, - cheevo->description, MENU_SETTINGS_CHEEVOS_START + i, 0, 0); + menu_entries_add_enum(info->list, cheevo->title, + cheevo->description, MSG_UNKNOWN, + MENU_SETTINGS_CHEEVOS_START + i, 0, 0); } if (settings->cheevos.test_unofficial) @@ -2237,8 +2252,9 @@ void cheevos_populate_menu(void *data) for (i = cheevos_locals.core.count; cheevo < end; i++, cheevo++) { if (cheevo->active) - menu_entries_add(info->list, cheevo->title, - cheevo->description, MENU_SETTINGS_CHEEVOS_START + i, 0, 0); + menu_entries_add_enum(info->list, cheevo->title, + cheevo->description, MSG_UNKNOWN, + MENU_SETTINGS_CHEEVOS_START + i, 0, 0); } } #endif diff --git a/command.c b/command.c index f0f71f53ad..4459b7f87c 100644 --- a/command.c +++ b/command.c @@ -149,32 +149,23 @@ struct cmd_action_map }; #ifdef HAVE_COMMAND -#define COMMAND_EXT_GLSL 0x7c976537U -#define COMMAND_EXT_GLSLP 0x0f840c87U -#define COMMAND_EXT_CG 0x0059776fU -#define COMMAND_EXT_CGP 0x0b8865bfU -#define COMMAND_EXT_SLANG 0x105ce63aU -#define COMMAND_EXT_SLANGP 0x1bf9adeaU - static bool command_set_shader(const char *arg) { char msg[256]; enum rarch_shader_type type = RARCH_SHADER_NONE; - const char *ext = path_get_extension(arg); - uint32_t ext_hash = msg_hash_calculate(ext); - switch (ext_hash) + switch (msg_hash_to_file_type(msg_hash_calculate(path_get_extension(arg)))) { - case COMMAND_EXT_GLSL: - case COMMAND_EXT_GLSLP: + case FILE_TYPE_SHADER_GLSL: + case FILE_TYPE_SHADER_PRESET_GLSLP: type = RARCH_SHADER_GLSL; break; - case COMMAND_EXT_CG: - case COMMAND_EXT_CGP: + case FILE_TYPE_SHADER_CG: + case FILE_TYPE_SHADER_PRESET_CGP: type = RARCH_SHADER_CG; break; - case COMMAND_EXT_SLANG: - case COMMAND_EXT_SLANGP: + case FILE_TYPE_SHADER_SLANG: + case FILE_TYPE_SHADER_PRESET_SLANGP: type = RARCH_SHADER_SLANG; break; default: @@ -197,7 +188,7 @@ static bool command_read_ram(const char *arg) cheevos_var_t var; const uint8_t * data; unsigned nbytes; - int i; + unsigned i; char reply[256]; char *reply_at = NULL; @@ -225,7 +216,6 @@ static bool command_read_ram(const char *arg) command_reply(reply, reply_at+strlen(" -1\n") - reply); } - return true; } diff --git a/config.def.h b/config.def.h index d99c529eb9..22350ae7f1 100644 --- a/config.def.h +++ b/config.def.h @@ -411,6 +411,8 @@ static const bool disable_composition = false; /* Video VSYNC (recommended) */ static const bool vsync = true; +static const unsigned max_swapchain_images = 3; + /* Attempts to hard-synchronize CPU and GPU. * Can reduce latency at cost of performance. */ static const bool hard_sync = false; diff --git a/configuration.c b/configuration.c index bcb8b65bcb..57fa933edb 100644 --- a/configuration.c +++ b/configuration.c @@ -495,6 +495,7 @@ static void config_set_defaults(void) settings->video.fullscreen_y = fullscreen_y; settings->video.disable_composition = disable_composition; settings->video.vsync = vsync; + settings->video.max_swapchain_images = max_swapchain_images; settings->video.hard_sync = hard_sync; settings->video.hard_sync_frames = hard_sync_frames; settings->video.frame_delay = frame_delay; @@ -941,7 +942,7 @@ static config_file_t *open_default_config_file(void) #if defined(_WIN32) && !defined(_XBOX) fill_pathname_application_path(app_path, sizeof(app_path)); fill_pathname_resolve_relative(conf_path, app_path, - "retroarch.cfg", sizeof(conf_path)); + file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(conf_path)); conf = config_file_new(conf_path); @@ -951,7 +952,7 @@ static config_file_t *open_default_config_file(void) sizeof(application_data))) { fill_pathname_join(conf_path, application_data, - "retroarch.cfg", sizeof(conf_path)); + file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(conf_path)); conf = config_file_new(conf_path); } } @@ -969,7 +970,7 @@ static config_file_t *open_default_config_file(void) /* Since this is a clean config file, we can * safely use config_save_on_exit. */ fill_pathname_resolve_relative(conf_path, app_path, - "retroarch.cfg", sizeof(conf_path)); + file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(conf_path)); config_set_bool(conf, "config_save_on_exit", true); saved = config_file_write(conf, conf_path); } @@ -993,7 +994,7 @@ static config_file_t *open_default_config_file(void) path_mkdir(application_data); fill_pathname_join(conf_path, application_data, - "retroarch.cfg", sizeof(conf_path)); + file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(conf_path)); conf = config_file_new(conf_path); if (!conf) @@ -1027,7 +1028,7 @@ static config_file_t *open_default_config_file(void) if (has_application_data) { fill_pathname_join(conf_path, application_data, - "retroarch.cfg", sizeof(conf_path)); + file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(conf_path)); RARCH_LOG("Looking for config in: \"%s\".\n", conf_path); conf = config_file_new(conf_path); } @@ -1051,7 +1052,7 @@ static config_file_t *open_default_config_file(void) fill_pathname_basedir(basedir, conf_path, sizeof(basedir)); - fill_pathname_join(conf_path, conf_path, "retroarch.cfg", sizeof(conf_path)); + fill_pathname_join(conf_path, conf_path, file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(conf_path)); if (path_mkdir(basedir)) { @@ -1059,7 +1060,7 @@ static config_file_t *open_default_config_file(void) char skeleton_conf[PATH_MAX_LENGTH] = {0}; fill_pathname_join(skeleton_conf, GLOBAL_CONFIG_DIR, - "retroarch.cfg", sizeof(skeleton_conf)); + file_path_str(FILE_PATH_MAIN_CONFIG), sizeof(skeleton_conf)); conf = config_file_new(skeleton_conf); if (conf) RARCH_WARN("Config: using skeleton config \"%s\" as base for a new config file.\n", skeleton_conf); @@ -1291,6 +1292,7 @@ static bool config_load_file(const char *path, bool set_defaults) CONFIG_GET_INT_BASE (conf, settings, video.monitor_index, "video_monitor_index"); CONFIG_GET_BOOL_BASE(conf, settings, video.disable_composition, "video_disable_composition"); CONFIG_GET_BOOL_BASE(conf, settings, video.vsync, "video_vsync"); + CONFIG_GET_INT_BASE(conf, settings, video.max_swapchain_images, "video_max_swapchain_images"); CONFIG_GET_BOOL_BASE(conf, settings, video.hard_sync, "video_hard_sync"); #ifdef HAVE_MENU @@ -1559,14 +1561,14 @@ static bool config_load_file(const char *path, bool set_defaults) fill_pathname_resolve_relative( settings->path.content_history, global->path.config, - "content_history.lpl", + file_path_str(FILE_PATH_CONTENT_HISTORY), sizeof(settings->path.content_history)); } else { fill_pathname_join(settings->path.content_history, settings->directory.content_history, - "content_history.lpl", + file_path_str(FILE_PATH_CONTENT_HISTORY), sizeof(settings->path.content_history)); } } @@ -2658,6 +2660,8 @@ bool config_save_file(const char *path) config_set_bool(conf, "video_vsync", settings->video.vsync); + config_set_int(conf, "video_max_swapchain_images", + settings->video.max_swapchain_images); config_set_bool(conf, "video_hard_sync", settings->video.hard_sync); config_set_int(conf, "video_hard_sync_frames", settings->video.hard_sync_frames); diff --git a/configuration.h b/configuration.h index 95fa5e9571..2cfbcb1fb9 100644 --- a/configuration.h +++ b/configuration.h @@ -55,6 +55,7 @@ typedef struct settings bool vsync; bool hard_sync; bool black_frame_insertion; + unsigned max_swapchain_images; unsigned swap_interval; unsigned hard_sync_frames; unsigned frame_delay; diff --git a/core_impl.c b/core_impl.c index 51cca53350..07cf7a99d0 100644 --- a/core_impl.c +++ b/core_impl.c @@ -378,8 +378,12 @@ bool core_load(void) bool core_verify_api_version(void) { unsigned api_version = core.retro_api_version(); - RARCH_LOG("Version of libretro API: %u\n", api_version); - RARCH_LOG("Compiled against API: %u\n", RETRO_API_VERSION); + RARCH_LOG("%s: %u\n", + msg_hash_to_str(MSG_VERSION_OF_LIBRETRO_API), + api_version); + RARCH_LOG("%s: %u\n", + msg_hash_to_str(MSG_COMPILED_AGAINST_API), + RETRO_API_VERSION); if (api_version != RETRO_API_VERSION) { diff --git a/core_info.c b/core_info.c index 42389247db..c35ac5598b 100644 --- a/core_info.c +++ b/core_info.c @@ -103,7 +103,7 @@ static void core_info_list_resolve_all_firmware( snprintf(path_key, sizeof(path_key), "firmware%u_path", c); snprintf(desc_key, sizeof(desc_key), "firmware%u_desc", c); - snprintf(opt_key, sizeof(opt_key), "firmware%u_opt", c); + snprintf(opt_key, sizeof(opt_key), "firmware%u_opt", c); config_get_string(config, path_key, &info->firmware[c].path); config_get_string(config, desc_key, &info->firmware[c].desc); diff --git a/cores/libretro-ffmpeg/Makefile b/cores/libretro-ffmpeg/Makefile index 0d8f0412ec..5a07733917 100644 --- a/cores/libretro-ffmpeg/Makefile +++ b/cores/libretro-ffmpeg/Makefile @@ -232,12 +232,13 @@ ifeq ($(HAVE_SSA), 1) endif LIBRETROCOMM_DIR := ../../libretro-common +DEPS_DIR := ../../deps LIBAVCODEC_SOURCE := LIBAVFORMAT_SOURCE := LIBAVUTIL_SOURCE := LIBRETRO_SOURCE := ffmpeg_core.c \ - $(LIBRETROCOMM_DIR)/queues/fifo_buffer.c \ + $(LIBRETROCOMM_DIR)/queues/fifo_queue.c \ $(LIBRETROCOMM_DIR)/rthreads/rthreads.c DEPS_SOURCE := @@ -2165,7 +2166,7 @@ else CFLAGS += -DHAVE_INTRINSICS_NEON=0 endif -CFLAGS += -Ilibretro-common/include +CFLAGS += -I$(LIBRETROCOMM_DIR)/include -I$(DEPS_DIR) OBJECTS := $(DEPS_SOURCE:.c=.o) $(LIBRETRO_SOURCE:.c=.o) $(FFT_SOURCE:.cpp=.o) $(LIBAVUTIL_SOURCE:.c=.o) $(LIBAVFORMAT_SOURCE:.c=.o) $(LIBSWSCALE_SOURCE:.c=.o) $(LIBAVCODEC_SOURCE:.c=.o) $(LIBSWRESAMPLE_SOURCE:.c=.o) diff --git a/cores/libretro-ffmpeg/fft/fft.cpp b/cores/libretro-ffmpeg/fft/fft.cpp new file mode 100644 index 0000000000..d73f5684da --- /dev/null +++ b/cores/libretro-ffmpeg/fft/fft.cpp @@ -0,0 +1,733 @@ +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define GLM_SWIZZLE +#define GLM_FORCE_RADIANS +#include +#include +#include +using namespace glm; + +#define GL_DEBUG 0 +#if GL_DEBUG +#define GL_CHECK_ERROR() do { \ + if (glGetError() != GL_NO_ERROR) \ + { \ + log_cb(RETRO_LOG_ERROR, "GL error at line: %d\n", __LINE__); \ + abort(); \ + } \ +} while(0) +#else +#define GL_CHECK_ERROR() +#endif + +#ifndef M_HALF_PI +#define M_HALF_PI 1.57079632679489661923132169163975144 +#endif + +extern retro_log_printf_t log_cb; + +struct target +{ + GLuint tex; + GLuint fbo; +}; + +struct Pass +{ + struct target target; + GLuint parameter_tex; +}; + +typedef struct GLFFT +{ + GLuint ms_rb_color; + GLuint ms_rb_ds; + GLuint ms_fbo; + + Pass *passes; + unsigned passes_size; + + GLuint input_tex; + GLuint window_tex; + GLuint prog_real; + GLuint prog_complex; + GLuint prog_resolve; + GLuint prog_blur; + + GLuint quad; + GLuint vao; + + unsigned output_ptr; + + struct target output, resolve, blur; + + struct Block + { + GLuint prog; + GLuint vao; + GLuint vbo; + GLuint ibo; + unsigned elems; + } block; + + GLuint pbo; + GLshort *sliding; + unsigned sliding_size; + + unsigned steps; + unsigned size; + unsigned block_size; + unsigned depth; +} glfft_t; + +#include "gl_shaders/fft_heightmap.glsl.vert.h" +#include "gl_shaders/fft_heightmap.glsl.frag.h" +#include "gl_shaders/fft_vertex_program.glsl.vert.h" +#include "gl_shaders/fft_fragment_program_resolve.glsl.frag.h" +#include "gl_shaders/fft_fragment_program_real.glsl.frag.h" +#include "gl_shaders/fft_fragment_program_complex.glsl.frag.h" +#include "gl_shaders/fft_fragment_program_blur.glsl.frag.h" + +static GLuint fft_compile_shader(glfft_t *fft, GLenum type, const char *source) +{ + GLint status = 0; + GLuint shader = glCreateShader(type); + + glShaderSource(shader, 1, (const GLchar**)&source, NULL); + glCompileShader(shader); + + glGetShaderiv(shader, GL_COMPILE_STATUS, &status); + + if (!status) + { + char log_info[8 * 1024]; + GLsizei log_len; + + log_cb(RETRO_LOG_ERROR, "Failed to compile.\n"); + glGetShaderInfoLog(shader, sizeof(log_info), &log_len, log_info); + log_cb(RETRO_LOG_ERROR, "ERROR: %s\n", log_info); + return 0; + } + + return shader; +} + +static GLuint fft_compile_program(glfft_t *fft, + const char *vertex_source, const char *fragment_source) +{ + GLint status = 0; + GLuint prog = glCreateProgram(); + GLuint vert = fft_compile_shader(fft, GL_VERTEX_SHADER, vertex_source); + GLuint frag = fft_compile_shader(fft, GL_FRAGMENT_SHADER, fragment_source); + + glAttachShader(prog, vert); + glAttachShader(prog, frag); + glLinkProgram(prog); + + glGetProgramiv(prog, GL_LINK_STATUS, &status); + + if (!status) + { + char log_info[8 * 1024]; + GLsizei log_len; + + log_cb(RETRO_LOG_ERROR, "Failed to link.\n"); + glGetProgramInfoLog(prog, sizeof(log_info), &log_len, log_info); + log_cb(RETRO_LOG_ERROR, "ERROR: %s\n", log_info); + } + + glDeleteShader(vert); + glDeleteShader(frag); + return prog; +} + +static void fft_render(glfft_t *fft, GLuint backbuffer, unsigned width, unsigned height) +{ + mat4 mvp; + + /* Render scene. */ + glBindFramebuffer(GL_FRAMEBUFFER, fft->ms_fbo ? fft->ms_fbo : backbuffer); + glViewport(0, 0, width, height); + glClearColor(0.1f, 0.15f, 0.1f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); + + vec3 eye(0, 80, -60); + mvp = perspective((float)M_HALF_PI, (float)width / height, 1.0f, 500.0f) * + lookAt(eye, eye + vec3(0.0f, 0.0f, 1.0f), vec3(0.0f, 1.0f, 0.0f)); + + glUseProgram(fft->block.prog); + glUniformMatrix4fv(glGetUniformLocation(fft->block.prog, "uMVP"), + 1, GL_FALSE, value_ptr(mvp)); + glUniform2i(glGetUniformLocation(fft->block.prog, "uOffset"), + (-int(fft->block_size) + 1) / 2, fft->output_ptr); + glUniform4f(glGetUniformLocation(fft->block.prog, "uHeightmapParams"), + -(fft->block_size - 1.0f) / 2.0f, 0.0f, 3.0f, 2.0f); + glUniform1f(glGetUniformLocation(fft->block.prog, "uAngleScale"), + M_PI / ((fft->block_size - 1) / 2)); + + glBindVertexArray(fft->block.vao); + glBindTexture(GL_TEXTURE_2D, fft->blur.tex); + glDrawElements(GL_TRIANGLE_STRIP, fft->block.elems, GL_UNSIGNED_INT, NULL); + glBindVertexArray(0); + glUseProgram(0); + + if (fft->ms_fbo) + { + glBindFramebuffer(GL_READ_FRAMEBUFFER, fft->ms_fbo); + glBindFramebuffer(GL_DRAW_FRAMEBUFFER, backbuffer); + glBlitFramebuffer(0, 0, width, height, 0, 0, width, height, + GL_COLOR_BUFFER_BIT, GL_NEAREST); + + static const GLenum attachments[] = { GL_COLOR_ATTACHMENT0, GL_DEPTH_STENCIL_ATTACHMENT }; + glBindFramebuffer(GL_FRAMEBUFFER, fft->ms_fbo); + glInvalidateFramebuffer(GL_FRAMEBUFFER, 2, attachments); + GL_CHECK_ERROR(); + } + + glBindFramebuffer(GL_FRAMEBUFFER, 0); + GL_CHECK_ERROR(); +} + +static void fft_step(glfft_t *fft, + const GLshort *audio_buffer, unsigned frames) +{ + unsigned i; + GLshort *buffer = NULL; + GLshort *slide = (GLshort*)&fft->sliding[0]; + + glEnable(GL_DEPTH_TEST); + glEnable(GL_CULL_FACE); + glBindVertexArray(fft->vao); + + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, fft->window_tex); + + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, fft->input_tex); + glUseProgram(fft->prog_real); + + memmove(slide, slide + frames * 2, + (fft->sliding_size - 2 * frames) * sizeof(GLshort)); + memcpy(slide + fft->sliding_size - frames * 2, audio_buffer, + 2 * frames * sizeof(GLshort)); + + /* Upload audio data to GPU. */ + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, fft->pbo); + + buffer = (GLshort*)(glMapBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, + 2 * fft->size * sizeof(GLshort), + GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT)); + + if (buffer) + { + memcpy(buffer, slide, fft->sliding_size * sizeof(GLshort)); + glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER); + } + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, fft->size, 1, + GL_RG_INTEGER, GL_SHORT, NULL); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); + + /* Perform FFT of new block. */ + glViewport(0, 0, fft->size, 1); + + for (i = 0; i < fft->steps; i++) + { + if (i == fft->steps - 1) + { + glBindFramebuffer(GL_FRAMEBUFFER, fft->output.fbo); + glUniform1i(glGetUniformLocation(i == 0 + ? fft->prog_real : fft->prog_complex, "uViewportOffset"), + fft->output_ptr); + glViewport(0, fft->output_ptr, fft->size, 1); + } + else + { + glUniform1i(glGetUniformLocation(i == 0 + ? fft->prog_real : fft->prog_complex, "uViewportOffset"), 0); + glBindFramebuffer(GL_FRAMEBUFFER, fft->passes[i].target.fbo); + glClear(GL_COLOR_BUFFER_BIT); + } + + glActiveTexture(GL_TEXTURE1); + glBindTexture(GL_TEXTURE_2D, fft->passes[i].parameter_tex); + + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, fft->passes[i].target.tex); + + if (i == 0) + glUseProgram(fft->prog_complex); + } + glActiveTexture(GL_TEXTURE0); + + /* Resolve new chunk to heightmap. */ + glViewport(0, fft->output_ptr, fft->size, 1); + glUseProgram(fft->prog_resolve); + glBindFramebuffer(GL_FRAMEBUFFER, fft->resolve.fbo); + const GLfloat resolve_offset[] = { 0.0f, float(fft->output_ptr) / fft->depth, + 1.0f, 1.0f / fft->depth }; + glUniform4fv(glGetUniformLocation(fft->prog_resolve, "uOffsetScale"), + 1, resolve_offset); + glBindTexture(GL_TEXTURE_2D, fft->output.tex); + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + /* Re-blur damaged regions of heightmap. */ + glUseProgram(fft->prog_blur); + glBindTexture(GL_TEXTURE_2D, fft->resolve.tex); + glBindFramebuffer(GL_FRAMEBUFFER, fft->blur.fbo); + glUniform4fv(glGetUniformLocation(fft->prog_blur, "uOffsetScale"), + 1, resolve_offset); + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + /* Mipmap the heightmap. */ + glBindTexture(GL_TEXTURE_2D, fft->blur.tex); + glGenerateMipmap(GL_TEXTURE_2D); + glBindTexture(GL_TEXTURE_2D, 0); + + fft->output_ptr++; + fft->output_ptr &= fft->depth - 1; + + glBindVertexArray(0); + glUseProgram(0); + GL_CHECK_ERROR(); +} + +static inline unsigned log2i(unsigned x) +{ + unsigned res; + for (res = 0; x; x >>= 1) + res++; + return res - 1; +} + +static inline unsigned bitinverse(unsigned x, unsigned size) +{ + unsigned i; + unsigned size_log2 = log2i(size); + unsigned ret = 0; + + for (i = 0; i < size_log2; i++) + ret |= ((x >> i) & 0x1) << (size_log2 - 1 - i); + return ret; +} + +static fft_complex_t exp_imag(float phase) +{ + fft_complex_t out = { cosf(phase), sinf(phase) }; + return out; +} + +void fft_build_params(glfft_t *fft, GLuint *buffer, + unsigned step, unsigned size) +{ + unsigned i, j; + unsigned step_size = 1 << step; + + for (i = 0; i < size; i += step_size << 1) + { + for (j = i; j < i + step_size; j++) + { + int s = j - i; + float phase = -1.0f * (float)s / step_size; + unsigned a = j; + unsigned b = j + step_size; + fft_complex_t twiddle = exp_imag(M_PI * phase); + + unsigned read_a = (step == 0) ? bitinverse(a, size) : a; + unsigned read_b = (step == 0) ? bitinverse(b, size) : b; + vec2 tmp = vec2(twiddle.real, twiddle.imag); + + buffer[2 * a + 0] = (read_a << 16) | read_b; + buffer[2 * a + 1] = packHalf2x16(tmp); + buffer[2 * b + 0] = (read_a << 16) | read_b; + buffer[2 * b + 1] = packHalf2x16(-tmp); + } + } +} + +static void fft_init_quad_vao(glfft_t *fft) +{ + static const GLbyte quad_buffer[] = { + -1, -1, 1, -1, -1, 1, 1, 1, + 0, 0, 1, 0, 0, 1, 1, 1, + }; + glGenBuffers(1, &fft->quad); + glBindBuffer(GL_ARRAY_BUFFER, fft->quad); + glBufferData(GL_ARRAY_BUFFER, + sizeof(quad_buffer), quad_buffer, GL_STATIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + glGenVertexArrays(1, &fft->vao); + glBindVertexArray(fft->vao); + glBindBuffer(GL_ARRAY_BUFFER, fft->quad); + glEnableVertexAttribArray(0); + glEnableVertexAttribArray(1); + glVertexAttribPointer(0, 2, GL_BYTE, GL_FALSE, 0, 0); + glVertexAttribPointer(1, 2, GL_BYTE, GL_FALSE, 0, + reinterpret_cast(uintptr_t(8))); + glBindVertexArray(0); + glBindBuffer(GL_ARRAY_BUFFER, 0); +} + +static void fft_init_texture(glfft_t *fft, GLuint *tex, GLenum format, + unsigned width, unsigned height, unsigned levels, GLenum mag, GLenum min) +{ + glGenTextures(1, tex); + glBindTexture(GL_TEXTURE_2D, *tex); + glTexStorage2D(GL_TEXTURE_2D, levels, format, width, height); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, mag); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, min); + glBindTexture(GL_TEXTURE_2D, 0); +} + +static void fft_init_target(glfft_t *fft, struct target *target, GLenum format, + unsigned width, unsigned height, unsigned levels, GLenum mag, GLenum min) +{ + fft_init_texture(fft, &target->tex, format, width, height, levels, mag, min); + glGenFramebuffers(1, &target->fbo); + glBindFramebuffer(GL_FRAMEBUFFER, target->fbo); + + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, + target->tex, 0); + + if (format == GL_RGBA8) + { + glClearColor(0, 0, 0, 0); + glClear(GL_COLOR_BUFFER_BIT); + } + else if (format == GL_RG16I) + { + static const GLint v[] = { 0, 0, 0, 0 }; + glClearBufferiv(GL_COLOR, 0, v); + } + else + { + static const GLuint v[] = { 0, 0, 0, 0 }; + glClearBufferuiv(GL_COLOR, 0, v); + } + glBindFramebuffer(GL_FRAMEBUFFER, 0); +} + +#define KAISER_BETA 12.0 + +static void fft_init(glfft_t *fft) +{ + unsigned i; + double window_mod; + GLushort *window; + static const GLfloat unity[] = { 0.0f, 0.0f, 1.0f, 1.0f }; + + fft->prog_real = fft_compile_program(fft, fft_vertex_program, fft_fragment_program_real); + fft->prog_complex = fft_compile_program(fft, fft_vertex_program, fft_fragment_program_complex); + fft->prog_resolve = fft_compile_program(fft, fft_vertex_program, fft_fragment_program_resolve); + fft->prog_blur = fft_compile_program(fft, fft_vertex_program, fft_fragment_program_blur); + GL_CHECK_ERROR(); + + glUseProgram(fft->prog_real); + glUniform1i(glGetUniformLocation(fft->prog_real, "sTexture"), 0); + glUniform1i(glGetUniformLocation(fft->prog_real, "sParameterTexture"), 1); + glUniform1i(glGetUniformLocation(fft->prog_real, "sWindow"), 2); + glUniform4fv(glGetUniformLocation(fft->prog_real, "uOffsetScale"), 1, unity); + + glUseProgram(fft->prog_complex); + glUniform1i(glGetUniformLocation(fft->prog_complex, "sTexture"), 0); + glUniform1i(glGetUniformLocation(fft->prog_complex, "sParameterTexture"), 1); + glUniform4fv(glGetUniformLocation(fft->prog_complex, "uOffsetScale"), 1, unity); + + glUseProgram(fft->prog_resolve); + glUniform1i(glGetUniformLocation(fft->prog_resolve, "sFFT"), 0); + glUniform4fv(glGetUniformLocation(fft->prog_resolve, "uOffsetScale"), 1, unity); + + glUseProgram(fft->prog_blur); + glUniform1i(glGetUniformLocation(fft->prog_blur, "sHeight"), 0); + glUniform4fv(glGetUniformLocation(fft->prog_blur, "uOffsetScale"), 1, unity); + + GL_CHECK_ERROR(); + + fft_init_texture(fft, &fft->window_tex, GL_R16UI, + fft->size, 1, 1, GL_NEAREST, GL_NEAREST); + GL_CHECK_ERROR(); + + window = (GLushort*)calloc(fft->size, sizeof(GLushort)); + + window_mod = 1.0 / kaiser_window_function(0.0, KAISER_BETA); + + for (i = 0; i < fft->size; i++) + { + double phase = (double)(i - int(fft->size) / 2) / (int(fft->size) / 2); + double w = kaiser_window_function(phase, KAISER_BETA); + window[i] = round(0xffff * w * window_mod); + } + glBindTexture(GL_TEXTURE_2D, fft->window_tex); + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, + fft->size, 1, GL_RED_INTEGER, GL_UNSIGNED_SHORT, &window[0]); + glBindTexture(GL_TEXTURE_2D, 0); + + GL_CHECK_ERROR(); + fft_init_texture(fft, &fft->input_tex, GL_RG16I, + fft->size, 1, 1, GL_NEAREST, GL_NEAREST); + fft_init_target(fft, &fft->output, GL_RG32UI, + fft->size, fft->depth, 1, GL_NEAREST, GL_NEAREST); + fft_init_target(fft, &fft->resolve, GL_RGBA8, + fft->size, fft->depth, 1, GL_NEAREST, GL_NEAREST); + fft_init_target(fft, &fft->blur, GL_RGBA8, + fft->size, fft->depth, + log2i(MAX(fft->size, fft->depth)) + 1, + GL_NEAREST, GL_LINEAR_MIPMAP_LINEAR); + + GL_CHECK_ERROR(); + + for (i = 0; i < fft->steps; i++) + { + GLuint *param_buffer = NULL; + fft_init_target(fft, &fft->passes[i].target, + GL_RG32UI, fft->size, 1, 1, GL_NEAREST, GL_NEAREST); + fft_init_texture(fft, &fft->passes[i].parameter_tex, + GL_RG32UI, fft->size, 1, 1, GL_NEAREST, GL_NEAREST); + + param_buffer = (GLuint*)calloc(2 * fft->size, sizeof(GLuint)); + + fft_build_params(fft, ¶m_buffer[0], i, fft->size); + + glBindTexture(GL_TEXTURE_2D, fft->passes[i].parameter_tex); + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, + fft->size, 1, GL_RG_INTEGER, GL_UNSIGNED_INT, ¶m_buffer[0]); + glBindTexture(GL_TEXTURE_2D, 0); + + free(param_buffer); + } + + GL_CHECK_ERROR(); + glGenBuffers(1, &fft->pbo); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, fft->pbo); + glBufferData(GL_PIXEL_UNPACK_BUFFER, + fft->size * 2 * sizeof(GLshort), 0, GL_DYNAMIC_DRAW); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); + + free(window); +} + +static void fft_init_block(glfft_t *fft) +{ + unsigned x, y; + GLuint *bp; + GLushort *block_vertices; + GLuint *block_indices; + unsigned block_vertices_size; + unsigned block_indices_size; + int pos = 0; + + fft->block.prog = fft_compile_program(fft, + fft_vertex_program_heightmap, fft_fragment_program_heightmap); + glUseProgram(fft->block.prog); + glUniform1i(glGetUniformLocation(fft->block.prog, "sHeight"), 0); + + block_vertices_size = 2 * fft->block_size * fft->depth; + block_vertices = (GLushort*)calloc(block_vertices_size, sizeof(GLushort)); + + for (y = 0; y < fft->depth; y++) + { + for (x = 0; x < fft->block_size; x++) + { + block_vertices[2 * (y * fft->block_size + x) + 0] = x; + block_vertices[2 * (y * fft->block_size + x) + 1] = y; + } + } + glGenBuffers(1, &fft->block.vbo); + glBindBuffer(GL_ARRAY_BUFFER, fft->block.vbo); + glBufferData(GL_ARRAY_BUFFER, + block_vertices_size * sizeof(GLushort), + &block_vertices[0], GL_STATIC_DRAW); + + fft->block.elems = (2 * fft->block_size - 1) * (fft->depth - 1) + 1; + + block_indices_size = fft->block.elems; + block_indices = (GLuint*)calloc(block_indices_size, sizeof(GLuint)); + + bp = &block_indices[0]; + + for (y = 0; y < fft->depth - 1; y++) + { + int x; + int step_odd = -int(fft->block_size) + ((y & 1) ? -1 : 1); + int step_even = fft->block_size; + + for (x = 0; x < 2 * int(fft->block_size) - 1; x++) + { + *bp++ = pos; + pos += (x & 1) ? step_odd : step_even; + } + } + *bp++ = pos; + + glGenVertexArrays(1, &fft->block.vao); + glBindVertexArray(fft->block.vao); + + glGenBuffers(1, &fft->block.ibo); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, fft->block.ibo); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, + block_indices_size * sizeof(GLuint), + &block_indices[0], GL_STATIC_DRAW); + + glEnableVertexAttribArray(0); + glVertexAttribPointer(0, 2, GL_UNSIGNED_SHORT, GL_FALSE, 0, 0); + glBindVertexArray(0); + + glBindBuffer(GL_ARRAY_BUFFER, 0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + + free(block_vertices); + free(block_indices); +} + +static bool fft_context_reset(glfft_t *fft, unsigned fft_steps, + rglgen_proc_address_t proc, unsigned fft_depth) +{ + rglgen_resolve_symbols(proc); + + fft->steps = fft_steps; + fft->depth = fft_depth; + fft->size = 1 << fft_steps; + fft->block_size = fft->size / 4 + 1; + + fft->passes_size = fft_steps; + fft->passes = (Pass*)calloc(fft->passes_size, sizeof(Pass)); + + if (!fft->passes) + return false; + + fft->sliding_size = 2 * fft->size; + fft->sliding = (GLshort*)calloc(fft->sliding_size, sizeof(GLshort)); + + if (!fft->sliding) + return false; + + GL_CHECK_ERROR(); + fft_init_quad_vao(fft); + GL_CHECK_ERROR(); + fft_init(fft); + GL_CHECK_ERROR(); + fft_init_block(fft); + GL_CHECK_ERROR(); + + return true; +} + +/* GLFFT requires either GLES3 or + * desktop GL with ES3_compat (supported by MESA on Linux) extension. */ +extern "C" glfft_t *glfft_new(unsigned fft_steps, rglgen_proc_address_t proc) +{ +#ifdef HAVE_OPENGLES3 + const char *ver = (const char*)(glGetString(GL_VERSION)); + if (ver) + { + unsigned major, minor; + if (sscanf(ver, "OpenGL ES %u.%u", &major, &minor) != 2 || major < 3) + return NULL; + } + else + return NULL; +#else + const char *exts = (const char*)(glGetString(GL_EXTENSIONS)); + if (!exts || !strstr(exts, "ARB_ES3_compatibility")) + return NULL; +#endif + + glfft_t *fft = (glfft_t*)calloc(1, sizeof(*fft)); + if (!fft) + return NULL; + + if (!fft_context_reset(fft, fft_steps, proc, 256)) + goto error; + + return fft; + +error: + if (fft) + free(fft); + return NULL; +} + +extern "C" void glfft_init_multisample(glfft_t *fft, + unsigned width, unsigned height, unsigned samples) +{ + if (fft->ms_rb_color) + glDeleteRenderbuffers(1, &fft->ms_rb_color); + fft->ms_rb_color = 0; + if (fft->ms_rb_ds) + glDeleteRenderbuffers(1, &fft->ms_rb_ds); + fft->ms_rb_ds = 0; + if (fft->ms_fbo) + glDeleteFramebuffers(1, &fft->ms_fbo); + fft->ms_fbo = 0; + + if (samples > 1) + { + glGenRenderbuffers(1, &fft->ms_rb_color); + glGenRenderbuffers(1, &fft->ms_rb_ds); + glGenFramebuffers (1, &fft->ms_fbo); + + glBindRenderbuffer(GL_RENDERBUFFER, fft->ms_rb_color); + glRenderbufferStorageMultisample(GL_RENDERBUFFER, samples, + GL_RGBA8, width, height); + glBindRenderbuffer(GL_RENDERBUFFER, fft->ms_rb_ds); + glRenderbufferStorageMultisample(GL_RENDERBUFFER, samples, + GL_DEPTH24_STENCIL8, width, height); + glBindRenderbuffer(GL_RENDERBUFFER, 0); + + glBindFramebuffer(GL_FRAMEBUFFER, fft->ms_fbo); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, + GL_RENDERBUFFER, fft->ms_rb_color); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, + GL_RENDERBUFFER, fft->ms_rb_ds); + if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) + glfft_init_multisample(fft, 0, 0, 0); + } + + glBindFramebuffer(GL_FRAMEBUFFER, 0); +} + +static void fft_context_destroy(glfft_t *fft) +{ + glfft_init_multisample(fft, 0, 0, 0); + if (fft->passes) + free(fft->passes); + fft->passes = NULL; + if (fft->sliding) + free(fft->sliding); + fft->sliding = NULL; +} + +extern "C" void glfft_free(glfft_t *fft) +{ + if (!fft) + return; + + fft_context_destroy(fft); + if (fft) + free(fft); + fft = NULL; +} + +extern "C" void glfft_step_fft(glfft_t *fft, + const GLshort *buffer, unsigned frames) +{ + fft_step(fft, buffer, frames); +} + +extern "C" void glfft_render(glfft_t *fft, GLuint backbuffer, + unsigned width, unsigned height) +{ + fft_render(fft, backbuffer, width, height); +} diff --git a/cores/libretro-ffmpeg/fft/fft.h b/cores/libretro-ffmpeg/fft/fft.h new file mode 100644 index 0000000000..e788530e62 --- /dev/null +++ b/cores/libretro-ffmpeg/fft/fft.h @@ -0,0 +1,26 @@ +#ifndef FFT_H__ +#define FFT_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct glfft glfft_t; + +glfft_t *glfft_new(unsigned fft_steps, rglgen_proc_address_t proc); + +void glfft_free(glfft_t *fft); + +void glfft_init_multisample(glfft_t *fft, unsigned width, unsigned height, unsigned samples); + +void glfft_step_fft(glfft_t *fft, const GLshort *buffer, unsigned frames); +void glfft_render(glfft_t *fft, GLuint backbuffer, unsigned width, unsigned height); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_blur.glsl.frag.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_blur.glsl.frag.h new file mode 100644 index 0000000000..6cb153f41e --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_blur.glsl.frag.h @@ -0,0 +1,28 @@ +#include "shaders_common.h" + +static const char *fft_fragment_program_blur = GLSL_300( + precision mediump float; + precision highp int; + precision highp usampler2D; + precision highp isampler2D; + in vec2 vTex; + out vec4 FragColor; + uniform sampler2D sHeight; + + void main() { + float k = 0.0; + float t; + vec4 res = vec4(0.0); + \n#define kernel(x, y) t = exp(-0.35 * float((x) * (x) + (y) * (y))); k += t; res += t * textureLodOffset(sHeight, vTex, 0.0, ivec2(x, y))\n + kernel(-1, -2); + kernel(-1, -1); + kernel(-1, 0); + kernel( 0, -2); + kernel( 0, -1); + kernel( 0, 0); + kernel( 1, -2); + kernel( 1, -1); + kernel( 1, 0); + FragColor = res / k; + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_complex.glsl.frag.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_complex.glsl.frag.h new file mode 100644 index 0000000000..dc85949ea1 --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_complex.glsl.frag.h @@ -0,0 +1,33 @@ +#include "shaders_common.h" + +static const char *fft_fragment_program_complex = GLSL_300( + precision mediump float; + precision highp int; + precision highp usampler2D; + precision highp isampler2D; + + in vec2 vTex; + uniform usampler2D sTexture; + uniform usampler2D sParameterTexture; + uniform int uViewportOffset; + out uvec2 FragColor; + + vec2 compMul(vec2 a, vec2 b) { return vec2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x); } + + void main() { + uvec2 params = texture(sParameterTexture, vec2(vTex.x, 0.5)).rg; + uvec2 coord = uvec2((params.x >> 16u) & 0xffffu, params.x & 0xffffu); + int ycoord = int(gl_FragCoord.y) - uViewportOffset; + vec2 twiddle = unpackHalf2x16(params.y); + + uvec2 x = texelFetch(sTexture, ivec2(int(coord.x), ycoord), 0).rg; + uvec2 y = texelFetch(sTexture, ivec2(int(coord.y), ycoord), 0).rg; + vec4 a = vec4(unpackHalf2x16(x.x), unpackHalf2x16(x.y)); + vec4 b = vec4(unpackHalf2x16(y.x), unpackHalf2x16(y.y)); + b.xy = compMul(b.xy, twiddle); + b.zw = compMul(b.zw, twiddle); + + vec4 res = a + b; + FragColor = uvec2(packHalf2x16(res.xy), packHalf2x16(res.zw)); + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_real.glsl.frag.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_real.glsl.frag.h new file mode 100644 index 0000000000..53aa03afcf --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_real.glsl.frag.h @@ -0,0 +1,40 @@ +#include "shaders_common.h" + +static const char *fft_fragment_program_real = GLSL_300( + precision mediump float; + precision highp int; + precision highp usampler2D; + precision highp isampler2D; + + in vec2 vTex; + uniform isampler2D sTexture; + uniform usampler2D sParameterTexture; + uniform usampler2D sWindow; + uniform int uViewportOffset; + out uvec2 FragColor; + + vec2 compMul(vec2 a, vec2 b) { return vec2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x); } + + void main() { + uvec2 params = texture(sParameterTexture, vec2(vTex.x, 0.5)).rg; + uvec2 coord = uvec2((params.x >> 16u) & 0xffffu, params.x & 0xffffu); + int ycoord = int(gl_FragCoord.y) - uViewportOffset; + vec2 twiddle = unpackHalf2x16(params.y); + + float window_a = float(texelFetch(sWindow, ivec2(coord.x, 0), 0).r) / float(0x10000); + float window_b = float(texelFetch(sWindow, ivec2(coord.y, 0), 0).r) / float(0x10000); + + vec2 a = window_a * vec2(texelFetch(sTexture, ivec2(int(coord.x), ycoord), 0).rg) / vec2(0x8000); + vec2 a_l = vec2(a.x, 0.0); + vec2 a_r = vec2(a.y, 0.0); + vec2 b = window_b * vec2(texelFetch(sTexture, ivec2(int(coord.y), ycoord), 0).rg) / vec2(0x8000); + vec2 b_l = vec2(b.x, 0.0); + vec2 b_r = vec2(b.y, 0.0); + b_l = compMul(b_l, twiddle); + b_r = compMul(b_r, twiddle); + + vec2 res_l = a_l + b_l; + vec2 res_r = a_r + b_r; + FragColor = uvec2(packHalf2x16(res_l), packHalf2x16(res_r)); + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_resolve.glsl.frag.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_resolve.glsl.frag.h new file mode 100644 index 0000000000..1c26428b78 --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_fragment_program_resolve.glsl.frag.h @@ -0,0 +1,30 @@ +#include "shaders_common.h" + +static const char *fft_fragment_program_resolve = GLSL_300( + precision mediump float; + precision highp int; + precision highp usampler2D; + precision highp isampler2D; + in vec2 vTex; + out vec4 FragColor; + uniform usampler2D sFFT; + + vec4 get_heights(highp uvec2 h) { + vec2 l = unpackHalf2x16(h.x); + vec2 r = unpackHalf2x16(h.y); + vec2 channels[4] = vec2[4]( + l, 0.5 * (l + r), r, 0.5 * (l - r)); + vec4 amps; + for (int i = 0; i < 4; i++) + amps[i] = dot(channels[i], channels[i]); + + return 9.0 * log(amps + 0.0001) - 22.0; + } + + void main() { + uvec2 h = textureLod(sFFT, vTex, 0.0).rg; + vec4 height = get_heights(h); + height = (height + 40.0) / 80.0; + FragColor = height; + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_heightmap.glsl.frag.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_heightmap.glsl.frag.h new file mode 100644 index 0000000000..8915e3d61e --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_heightmap.glsl.frag.h @@ -0,0 +1,18 @@ +#include "shaders_common.h" + +static const char *fft_fragment_program_heightmap = GLSL_300( + precision mediump float; + out vec4 FragColor; + in vec3 vWorldPos; + in vec3 vHeight; + + vec3 colormap(vec3 height) { + return 1.0 / (1.0 + exp(-0.08 * height)); + } + + void main() { + vec3 color = mix(vec3(1.0, 0.7, 0.7) * colormap(vHeight), vec3(0.1, 0.15, 0.1), clamp(vWorldPos.z / 400.0, 0.0, 1.0)); + color = mix(color, vec3(0.1, 0.15, 0.1), clamp(1.0 - vWorldPos.z / 2.0, 0.0, 1.0)); + FragColor = vec4(color, 1.0); + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_heightmap.glsl.vert.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_heightmap.glsl.vert.h new file mode 100644 index 0000000000..a5ea0adb2e --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_heightmap.glsl.vert.h @@ -0,0 +1,43 @@ +#include "shaders_common.h" + +static const char *fft_vertex_program_heightmap = GLSL_300( + layout(location = 0) in vec2 aVertex; + uniform sampler2D sHeight; + uniform mat4 uMVP; + uniform ivec2 uOffset; + uniform vec4 uHeightmapParams; + uniform float uAngleScale; + out vec3 vWorldPos; + out vec3 vHeight; + + void main() { + vec2 tex_coord = vec2(aVertex.x + float(uOffset.x) + 0.5, -aVertex.y + float(uOffset.y) + 0.5) / vec2(textureSize(sHeight, 0)); + + vec3 world_pos = vec3(aVertex.x, 0.0, aVertex.y); + world_pos.xz += uHeightmapParams.xy; + + float angle = world_pos.x * uAngleScale; + world_pos.xz *= uHeightmapParams.zw; + + float lod = log2(world_pos.z + 1.0) - 6.0; + vec4 heights = textureLod(sHeight, tex_coord, lod); + + float cangle = cos(angle); + float sangle = sin(angle); + + int c = int(-sign(world_pos.x) + 1.0); + float height = mix(heights[c], heights[1], abs(angle) / 3.141592653); + height = height * 80.0 - 40.0; + + vec3 up = vec3(-sangle, cangle, 0.0); + + float base_y = 80.0 - 80.0 * cangle; + float base_x = 80.0 * sangle; + world_pos.xy = vec2(base_x, base_y); + world_pos += up * height; + + vWorldPos = world_pos; + vHeight = vec3(height, heights.yw * 80.0 - 40.0); + gl_Position = uMVP * vec4(world_pos, 1.0); + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/fft_vertex_program.glsl.vert.h b/cores/libretro-ffmpeg/fft/gl_shaders/fft_vertex_program.glsl.vert.h new file mode 100644 index 0000000000..9bc86b7f79 --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/fft_vertex_program.glsl.vert.h @@ -0,0 +1,13 @@ +#include "shaders_common.h" + +static const char *fft_vertex_program = GLSL_300( + precision mediump float; + layout(location = 0) in vec2 aVertex; + layout(location = 1) in vec2 aTexCoord; + uniform vec4 uOffsetScale; + out vec2 vTex; + void main() { + vTex = uOffsetScale.xy + aTexCoord * uOffsetScale.zw; + gl_Position = vec4(aVertex, 0.0, 1.0); + } +); diff --git a/cores/libretro-ffmpeg/fft/gl_shaders/shaders_common.h b/cores/libretro-ffmpeg/fft/gl_shaders/shaders_common.h new file mode 100644 index 0000000000..61b5ac1006 --- /dev/null +++ b/cores/libretro-ffmpeg/fft/gl_shaders/shaders_common.h @@ -0,0 +1,14 @@ +#ifndef _SHADERS_COMMON +#define _SHADERS_COMMON + +#if defined(HAVE_OPENGLES) +#define CG(src) "" #src +#define GLSL(src) "precision mediump float;\n" #src +#define GLSL_300(src) "#version 300 es\n" #src +#else +#define CG(src) "" #src +#define GLSL(src) "" #src +#define GLSL_300(src) "#version 300 es\n" #src +#endif + +#endif diff --git a/cores/libretro-imageviewer/image_core.c b/cores/libretro-imageviewer/image_core.c index 547f90a8ea..be78b7747e 100644 --- a/cores/libretro-imageviewer/image_core.c +++ b/cores/libretro-imageviewer/image_core.c @@ -9,7 +9,9 @@ #include #include +#if 0 #define HAVE_STB_IMAGE +#endif #ifdef HAVE_STB_IMAGE #define STB_IMAGE_IMPLEMENTATION @@ -394,9 +396,10 @@ void IMAGE_CORE_PREFIX(retro_run)(void) /* RGBA > XRGB8888 */ struct retro_system_av_info info; - uint32_t *buf = &image_buffer[0]; +#ifdef HAVE_STB_IMAGE int x, y; - + uint32_t *buf = &image_buffer[0]; + for (y = 0; y < image_height; y++) { for (x = 0; x < image_width; x++, buf++) @@ -421,6 +424,7 @@ void IMAGE_CORE_PREFIX(retro_run)(void) } } } +#endif IMAGE_CORE_PREFIX(retro_get_system_av_info)(&info); diff --git a/cores/libretro-test-vulkan/libretro-test.c b/cores/libretro-test-vulkan/libretro-test.c index 144661742e..c2ec3b7ef7 100644 --- a/cores/libretro-test-vulkan/libretro-test.c +++ b/cores/libretro-test-vulkan/libretro-test.c @@ -4,7 +4,7 @@ #include #include -#include "../../libretro_vulkan.h" +#include #include "shaders/triangle.vert.inc" #include "shaders/triangle.frag.inc" diff --git a/deps/glm/common.hpp b/deps/glm/common.hpp new file mode 100644 index 0000000000..b8bb14e72d --- /dev/null +++ b/deps/glm/common.hpp @@ -0,0 +1,34 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/common.hpp +/// @date 2013-12-24 / 2013-12-24 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_COMMON_INCLUDED +#define GLM_COMMON_INCLUDED + +#include "detail/func_common.hpp" + +#endif//GLM_COMMON_INCLUDED diff --git a/deps/glm/detail/_features.hpp b/deps/glm/detail/_features.hpp new file mode 100644 index 0000000000..f11876acd5 --- /dev/null +++ b/deps/glm/detail/_features.hpp @@ -0,0 +1,427 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/_features.hpp +/// @date 2013-02-20 / 2013-02-20 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef glm_core_features +#define glm_core_features + +// #define GLM_CXX98_EXCEPTIONS +// #define GLM_CXX98_RTTI + +// #define GLM_CXX11_RVALUE_REFERENCES +// Rvalue references - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html + +// GLM_CXX11_TRAILING_RETURN +// Rvalue references for *this - GCC not supported +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm + +// GLM_CXX11_NONSTATIC_MEMBER_INIT +// Initialization of class objects by rvalues - GCC any +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html + +// GLM_CXX11_NONSTATIC_MEMBER_INIT +// Non-static data member initializers - GCC 4.7 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm + +// #define GLM_CXX11_VARIADIC_TEMPLATE +// Variadic templates - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf + +// +// Extending variadic template template parameters - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf + +// #define GLM_CXX11_GENERALIZED_INITIALIZERS +// Initializer lists - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm + +// #define GLM_CXX11_STATIC_ASSERT +// Static assertions - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html + +// #define GLM_CXX11_AUTO_TYPE +// auto-typed variables - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf + +// #define GLM_CXX11_AUTO_TYPE +// Multi-declarator auto - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf + +// #define GLM_CXX11_AUTO_TYPE +// Removal of auto as a storage-class specifier - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm + +// #define GLM_CXX11_AUTO_TYPE +// New function declarator syntax - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm + +// #define GLM_CXX11_LAMBDAS +// New wording for C++0x lambdas - GCC 4.5 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf + +// #define GLM_CXX11_DECLTYPE +// Declared type of an expression - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf + +// +// Right angle brackets - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html + +// +// Default template arguments for function templates DR226 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226 + +// +// Solving the SFINAE problem for expressions DR339 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html + +// #define GLM_CXX11_ALIAS_TEMPLATE +// Template aliases N2258 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf + +// +// Extern templates N1987 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm + +// #define GLM_CXX11_NULLPTR +// Null pointer constant N2431 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf + +// #define GLM_CXX11_STRONG_ENUMS +// Strongly-typed enums N2347 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf + +// +// Forward declarations for enums N2764 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf + +// +// Generalized attributes N2761 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf + +// +// Generalized constant expressions N2235 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf + +// +// Alignment support N2341 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf + +// #define GLM_CXX11_DELEGATING_CONSTRUCTORS +// Delegating constructors N1986 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf + +// +// Inheriting constructors N2540 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm + +// #define GLM_CXX11_EXPLICIT_CONVERSIONS +// Explicit conversion operators N2437 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf + +// +// New character types N2249 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html + +// +// Unicode string literals N2442 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + +// +// Raw string literals N2442 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + +// +// Universal character name literals N2170 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html + +// #define GLM_CXX11_USER_LITERALS +// User-defined literals N2765 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf + +// +// Standard Layout Types N2342 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm + +// #define GLM_CXX11_DEFAULTED_FUNCTIONS +// #define GLM_CXX11_DELETED_FUNCTIONS +// Defaulted and deleted functions N2346 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm + +// +// Extended friend declarations N1791 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf + +// +// Extending sizeof N2253 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html + +// #define GLM_CXX11_INLINE_NAMESPACES +// Inline namespaces N2535 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm + +// #define GLM_CXX11_UNRESTRICTED_UNIONS +// Unrestricted unions N2544 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf + +// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS +// Local and unnamed types as template arguments N2657 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm + +// #define GLM_CXX11_RANGE_FOR +// Range-based for N2930 GCC 4.6 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html + +// #define GLM_CXX11_OVERRIDE_CONTROL +// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm + +// +// Minimal support for garbage collection and reachability-based leak detection N2670 No +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm + +// #define GLM_CXX11_NOEXCEPT +// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only) +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html + +// +// Defining move special member functions N3053 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html + +// +// Sequence points N2239 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html + +// +// Atomic operations N2427 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html + +// +// Strong Compare and Exchange N2748 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html + +// +// Bidirectional Fences N2752 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm + +// +// Memory model N2429 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm + +// +// Data-dependency ordering: atomics and memory model N2664 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm + +// +// Propagating exceptions N2179 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html + +// +// Abandoning a process and at_quick_exit N2440 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm + +// +// Allow atomics use in signal handlers N2547 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm + +// +// Thread-local storage N2659 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm + +// +// Dynamic initialization and destruction with concurrency N2660 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm + +// +// __func__ predefined identifier N2340 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm + +// +// C99 preprocessor N1653 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm + +// +// long long N1811 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf + +// +// Extended integral types N1988 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf + +#if(GLM_COMPILER & GLM_COMPILER_GCC) + +# if(GLM_COMPILER >= GLM_COMPILER_GCC43) +# define GLM_CXX11_STATIC_ASSERT +# endif + +#elif(GLM_COMPILER & GLM_COMPILER_CLANG) +# if(__has_feature(cxx_exceptions)) +# define GLM_CXX98_EXCEPTIONS +# endif + +# if(__has_feature(cxx_rtti)) +# define GLM_CXX98_RTTI +# endif + +# if(__has_feature(cxx_access_control_sfinae)) +# define GLM_CXX11_ACCESS_CONTROL_SFINAE +# endif + +# if(__has_feature(cxx_alias_templates)) +# define GLM_CXX11_ALIAS_TEMPLATE +# endif + +# if(__has_feature(cxx_alignas)) +# define GLM_CXX11_ALIGNAS +# endif + +# if(__has_feature(cxx_attributes)) +# define GLM_CXX11_ATTRIBUTES +# endif + +# if(__has_feature(cxx_constexpr)) +# define GLM_CXX11_CONSTEXPR +# endif + +# if(__has_feature(cxx_decltype)) +# define GLM_CXX11_DECLTYPE +# endif + +# if(__has_feature(cxx_default_function_template_args)) +# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS +# endif + +# if(__has_feature(cxx_defaulted_functions)) +# define GLM_CXX11_DEFAULTED_FUNCTIONS +# endif + +# if(__has_feature(cxx_delegating_constructors)) +# define GLM_CXX11_DELEGATING_CONSTRUCTORS +# endif + +# if(__has_feature(cxx_deleted_functions)) +# define GLM_CXX11_DELETED_FUNCTIONS +# endif + +# if(__has_feature(cxx_explicit_conversions)) +# define GLM_CXX11_EXPLICIT_CONVERSIONS +# endif + +# if(__has_feature(cxx_generalized_initializers)) +# define GLM_CXX11_GENERALIZED_INITIALIZERS +# endif + +# if(__has_feature(cxx_implicit_moves)) +# define GLM_CXX11_IMPLICIT_MOVES +# endif + +# if(__has_feature(cxx_inheriting_constructors)) +# define GLM_CXX11_INHERITING_CONSTRUCTORS +# endif + +# if(__has_feature(cxx_inline_namespaces)) +# define GLM_CXX11_INLINE_NAMESPACES +# endif + +# if(__has_feature(cxx_lambdas)) +# define GLM_CXX11_LAMBDAS +# endif + +# if(__has_feature(cxx_local_type_template_args)) +# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS +# endif + +# if(__has_feature(cxx_noexcept)) +# define GLM_CXX11_NOEXCEPT +# endif + +# if(__has_feature(cxx_nonstatic_member_init)) +# define GLM_CXX11_NONSTATIC_MEMBER_INIT +# endif + +# if(__has_feature(cxx_nullptr)) +# define GLM_CXX11_NULLPTR +# endif + +# if(__has_feature(cxx_override_control)) +# define GLM_CXX11_OVERRIDE_CONTROL +# endif + +# if(__has_feature(cxx_reference_qualified_functions)) +# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS +# endif + +# if(__has_feature(cxx_range_for)) +# define GLM_CXX11_RANGE_FOR +# endif + +# if(__has_feature(cxx_raw_string_literals)) +# define GLM_CXX11_RAW_STRING_LITERALS +# endif + +# if(__has_feature(cxx_rvalue_references)) +# define GLM_CXX11_RVALUE_REFERENCES +# endif + +# if(__has_feature(cxx_static_assert)) +# define GLM_CXX11_STATIC_ASSERT +# endif + +# if(__has_feature(cxx_auto_type)) +# define GLM_CXX11_AUTO_TYPE +# endif + +# if(__has_feature(cxx_strong_enums)) +# define GLM_CXX11_STRONG_ENUMS +# endif + +# if(__has_feature(cxx_trailing_return)) +# define GLM_CXX11_TRAILING_RETURN +# endif + +# if(__has_feature(cxx_unicode_literals)) +# define GLM_CXX11_UNICODE_LITERALS +# endif + +# if(__has_feature(cxx_unrestricted_unions)) +# define GLM_CXX11_UNRESTRICTED_UNIONS +# endif + +# if(__has_feature(cxx_user_literals)) +# define GLM_CXX11_USER_LITERALS +# endif + +# if(__has_feature(cxx_variadic_templates)) +# define GLM_CXX11_VARIADIC_TEMPLATES +# endif + +#endif//(GLM_COMPILER & GLM_COMPILER_CLANG) + +#endif//glm_core_features diff --git a/deps/glm/detail/_fixes.hpp b/deps/glm/detail/_fixes.hpp new file mode 100644 index 0000000000..95be0e720c --- /dev/null +++ b/deps/glm/detail/_fixes.hpp @@ -0,0 +1,55 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/_fixes.hpp +/// @date 2011-02-21 / 2011-11-22 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#include + +//! Workaround for compatibility with other libraries +#ifdef max +#undef max +#endif + +//! Workaround for compatibility with other libraries +#ifdef min +#undef min +#endif + +//! Workaround for Android +#ifdef isnan +#undef isnan +#endif + +//! Workaround for Android +#ifdef isinf +#undef isinf +#endif + +//! Workaround for Chrone Native Client +#ifdef log2 +#undef log2 +#endif + diff --git a/deps/glm/detail/_literals.hpp b/deps/glm/detail/_literals.hpp new file mode 100644 index 0000000000..f46098140e --- /dev/null +++ b/deps/glm/detail/_literals.hpp @@ -0,0 +1,51 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/_literals.hpp +/// @date 2013-05-06 / 2013-05-06 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef glm_core_literals +#define glm_core_literals + +namespace glm +{ +#define GLM_CXX11_USER_LITERALS +#ifdef GLM_CXX11_USER_LITERALS +/* + GLM_FUNC_QUALIFIER detail::half operator "" _h(long double const s) + { + return detail::half(s); + } + + GLM_FUNC_QUALIFIER float operator "" _f(long double const s) + { + return static_cast(s); + } +*/ +#endif//GLM_CXX11_USER_LITERALS + +}//namespace glm + +#endif//glm_core_literals diff --git a/deps/glm/detail/_noise.hpp b/deps/glm/detail/_noise.hpp new file mode 100644 index 0000000000..bf779bf691 --- /dev/null +++ b/deps/glm/detail/_noise.hpp @@ -0,0 +1,130 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/detail/_noise.hpp +/// @date 2013-12-24 / 2013-12-24 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_DETAIL_NOISE_INCLUDED +#define GLM_DETAIL_NOISE_INCLUDED + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER T mod289(T const & x) + { + return x - floor(x * static_cast(1.0) / static_cast(289.0)) * static_cast(289.0); + } + + template + GLM_FUNC_QUALIFIER T permute(T const & x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER tvec2 permute(tvec2 const & x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER tvec3 permute(tvec3 const & x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER tvec4 permute(tvec4 const & x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } +/* + template class vecType> + GLM_FUNC_QUALIFIER vecType permute(vecType const & x) + { + return mod289(((x * T(34)) + T(1)) * x); + } +*/ + template + GLM_FUNC_QUALIFIER T taylorInvSqrt(T const & r) + { + return T(1.79284291400159) - T(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER detail::tvec2 taylorInvSqrt(detail::tvec2 const & r) + { + return T(1.79284291400159) - T(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER detail::tvec3 taylorInvSqrt(detail::tvec3 const & r) + { + return T(1.79284291400159) - T(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER detail::tvec4 taylorInvSqrt(detail::tvec4 const & r) + { + return T(1.79284291400159) - T(0.85373472095314) * r; + } +/* + template class vecType> + GLM_FUNC_QUALIFIER vecType taylorInvSqrt(vecType const & r) + { + return T(1.79284291400159) - T(0.85373472095314) * r; + } +*/ + + template + GLM_FUNC_QUALIFIER detail::tvec2 fade(detail::tvec2 const & t) + { + return (t * t * t) * (t * (t * T(6) - T(15)) + T(10)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec3 fade(detail::tvec3 const & t) + { + return (t * t * t) * (t * (t * T(6) - T(15)) + T(10)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec4 fade(detail::tvec4 const & t) + { + return (t * t * t) * (t * (t * T(6) - T(15)) + T(10)); + } +/* + template class vecType> + GLM_FUNC_QUALIFIER vecType fade(vecType const & t) + { + return (t * t * t) * (t * (t * T(6) - T(15)) + T(10)); + } +*/ +}//namespace detail +}//namespace glm + +#endif//GLM_DETAIL_NOISE_INCLUDED + diff --git a/deps/glm/detail/_swizzle.hpp b/deps/glm/detail/_swizzle.hpp new file mode 100644 index 0000000000..1be3786b98 --- /dev/null +++ b/deps/glm/detail/_swizzle.hpp @@ -0,0 +1,840 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/_swizzle.hpp +/// @date 2006-04-20 / 2011-02-16 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef glm_core_swizzle +#define glm_core_swizzle + +namespace glm{ +namespace detail +{ + // Internal class for implementing swizzle operators + template + struct _swizzle_base0 + { + typedef T value_type; + + protected: + GLM_FUNC_QUALIFIER value_type& elem (size_t i) { return (reinterpret_cast(_buffer))[i]; } + GLM_FUNC_QUALIFIER const value_type& elem (size_t i) const { return (reinterpret_cast(_buffer))[i]; } + + // Use an opaque buffer to *ensure* the compiler doesn't call a constructor. + // The size 1 buffer is assumed to aligned to the actual members so that the + // elem() + char _buffer[1]; + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER V operator ()() const { return V(this->elem(E0), this->elem(E1)); } + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER V operator ()() const { return V(this->elem(E0), this->elem(E1), this->elem(E2)); } + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER V operator ()() const { return V(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); } + }; + + // Internal class for implementing swizzle operators + /* + Template parameters: + + ValueType = type of scalar values (e.g. float, double) + VecType = class the swizzle is applies to (e.g. tvec3) + N = number of components in the vector (e.g. 3) + E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec + + DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles + containing duplicate elements so that they cannot be used as r-values). + */ + template + struct _swizzle_base2 : public _swizzle_base1 + { + typedef VecType vec_type; + typedef ValueType value_type; + + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const ValueType& t) + { + for (int i = 0; i < N; ++i) + (*this)[i] = t; + return *this; + } + + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const VecType& that) + { + struct op { + GLM_FUNC_QUALIFIER void operator() (value_type& e, value_type& t) { e = t; } + }; + _apply_op(that, op()); + return *this; + } + + GLM_FUNC_QUALIFIER void operator -= (const VecType& that) + { + struct op { + GLM_FUNC_QUALIFIER void operator() (value_type& e, value_type& t) { e -= t; } + }; + _apply_op(that, op()); + } + + GLM_FUNC_QUALIFIER void operator += (const VecType& that) + { + struct op { + GLM_FUNC_QUALIFIER void operator() (value_type& e, value_type& t) { e += t; } + }; + _apply_op(that, op()); + } + + GLM_FUNC_QUALIFIER void operator *= (const VecType& that) + { + struct op { + GLM_FUNC_QUALIFIER void operator() (value_type& e, value_type& t) { e *= t; } + }; + _apply_op(that, op()); + } + + GLM_FUNC_QUALIFIER void operator /= (const VecType& that) + { + struct op { + GLM_FUNC_QUALIFIER void operator() (value_type& e, value_type& t) { e /= t; } + }; + _apply_op(that, op()); + } + + GLM_FUNC_QUALIFIER value_type& operator[] (size_t i) + { +#ifndef __CUDA_ARCH__ + static +#endif + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + GLM_FUNC_QUALIFIER value_type operator[] (size_t i) const + { +#ifndef __CUDA_ARCH__ + static +#endif + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + protected: + template + GLM_FUNC_QUALIFIER void _apply_op(const VecType& that, T op) + { + // Make a copy of the data in this == &that. + // The copier should optimize out the copy in cases where the function is + // properly inlined and the copy is not necessary. + ValueType t[N]; + for (int i = 0; i < N; ++i) + t[i] = that[i]; + for (int i = 0; i < N; ++i) + op( (*this)[i], t[i] ); + } + }; + + // Specialization for swizzles containing duplicate elements. These cannot be modified. + template + struct _swizzle_base2 : public _swizzle_base1 + { + typedef VecType vec_type; + typedef ValueType value_type; + + struct Stub {}; + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const &) { return *this; } + + GLM_FUNC_QUALIFIER value_type operator[] (size_t i) const + { +#ifndef __CUDA_ARCH__ + static +#endif + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + }; + + template + struct _swizzle : public _swizzle_base2 + { + typedef _swizzle_base2 base_type; + + using base_type::operator=; + + GLM_FUNC_QUALIFIER operator VecType () const { return (*this)(); } + }; + +// +// To prevent the C++ syntax from getting entirely overwhelming, define some alias macros +// +#define _GLM_SWIZZLE_TEMPLATE1 template +#define _GLM_SWIZZLE_TEMPLATE2 template +#define _GLM_SWIZZLE_TYPE1 _swizzle +#define _GLM_SWIZZLE_TYPE2 _swizzle + +// +// Wrapper for a binary operator (e.g. u.yy + v.zy) +// +#define _GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ + _GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER V operator OPERAND ( const _GLM_SWIZZLE_TYPE1& a, const _GLM_SWIZZLE_TYPE2& b) \ + { \ + return a() OPERAND b(); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER V operator OPERAND ( const _GLM_SWIZZLE_TYPE1& a, const V& b) \ + { \ + return a() OPERAND b; \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER V operator OPERAND ( const V& a, const _GLM_SWIZZLE_TYPE1& b) \ + { \ + return a OPERAND b(); \ + } + +// +// Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz) +// +#define _GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER V operator OPERAND ( const _GLM_SWIZZLE_TYPE1& a, const T& b) \ + { \ + return a() OPERAND b; \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER V operator OPERAND ( const T& a, const _GLM_SWIZZLE_TYPE1& b) \ + { \ + return a OPERAND b(); \ + } + +// +// Macro for wrapping a function taking one argument (e.g. abs()) +// +#define _GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a) \ + { \ + return FUNCTION(a()); \ + } + +// +// Macro for wrapping a function taking two vector arguments (e.g. dot()). +// +#define _GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \ + _GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a, const _GLM_SWIZZLE_TYPE2& b) \ + { \ + return FUNCTION(a(), b()); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a, const _GLM_SWIZZLE_TYPE1& b) \ + { \ + return FUNCTION(a(), b()); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a, const typename V& b) \ + { \ + return FUNCTION(a(), b); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const _GLM_SWIZZLE_TYPE1& b) \ + { \ + return FUNCTION(a, b()); \ + } + +// +// Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()). +// +#define _GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \ + _GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a, const _GLM_SWIZZLE_TYPE2& b, const T& c) \ + { \ + return FUNCTION(a(), b(), c); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a, const _GLM_SWIZZLE_TYPE1& b, const T& c) \ + { \ + return FUNCTION(a(), b(), c); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const _GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\ + { \ + return FUNCTION(a(), b, c); \ + } \ + _GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename _GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const _GLM_SWIZZLE_TYPE1& b, const T& c) \ + { \ + return FUNCTION(a, b(), c); \ + } + +}//namespace detail +}//namespace glm + +namespace glm +{ + namespace detail + { + _GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-) + _GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*) + _GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+) + _GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-) + _GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*) + _GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/) + } + + // + // Swizzles are distinct types from the unswizzled type. The below macros will + // provide template specializations for the swizzle types for the given functions + // so that the compiler does not have any ambiguity to choosing how to handle + // the function. + // + // The alternative is to use the operator()() when calling the function in order + // to explicitly convert the swizzled type to the unswizzled type. + // + + //_GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs); + //_GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos); + //_GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh); + //_GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all); + //_GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any); + + //_GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot); + //_GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross); + //_GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step); + //_GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix); +} + +#define _GLM_SWIZZLE2_2_MEMBERS(T, P, V, E0,E1) \ + struct { _swizzle<2, T, P, V, 0,0,-1,-2> E0 ## E0; }; \ + struct { _swizzle<2, T, P, V, 0,1,-1,-2> E0 ## E1; }; \ + struct { _swizzle<2, T, P, V, 1,0,-1,-2> E1 ## E0; }; \ + struct { _swizzle<2, T, P, V, 1,1,-1,-2> E1 ## E1; }; + +#define _GLM_SWIZZLE2_3_MEMBERS(T, P, V, E0,E1) \ + struct { _swizzle<3,T, P, V, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { _swizzle<3,T, P, V, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { _swizzle<3,T, P, V, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { _swizzle<3,T, P, V, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { _swizzle<3,T, P, V, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { _swizzle<3,T, P, V, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { _swizzle<3,T, P, V, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { _swizzle<3,T, P, V, 1,1,1,-1> E1 ## E1 ## E1; }; + +#define _GLM_SWIZZLE2_4_MEMBERS(T, P, V, E0,E1) \ + struct { _swizzle<4,T, P, V, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; + +#define _GLM_SWIZZLE3_2_MEMBERS(T, P, V, E0,E1,E2) \ + struct { _swizzle<2,T, P, V, 0,0,-1,-2> E0 ## E0; }; \ + struct { _swizzle<2,T, P, V, 0,1,-1,-2> E0 ## E1; }; \ + struct { _swizzle<2,T, P, V, 0,2,-1,-2> E0 ## E2; }; \ + struct { _swizzle<2,T, P, V, 1,0,-1,-2> E1 ## E0; }; \ + struct { _swizzle<2,T, P, V, 1,1,-1,-2> E1 ## E1; }; \ + struct { _swizzle<2,T, P, V, 1,2,-1,-2> E1 ## E2; }; \ + struct { _swizzle<2,T, P, V, 2,0,-1,-2> E2 ## E0; }; \ + struct { _swizzle<2,T, P, V, 2,1,-1,-2> E2 ## E1; }; \ + struct { _swizzle<2,T, P, V, 2,2,-1,-2> E2 ## E2; }; + +#define _GLM_SWIZZLE3_3_MEMBERS(T, P, V ,E0,E1,E2) \ + struct { _swizzle<3,T,P, V, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,0,2,-1> E0 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,1,2,-1> E0 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 0,2,0,-1> E0 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,2,1,-1> E0 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,2,2,-1> E0 ## E2 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,0,2,-1> E1 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,1,1,-1> E1 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,1,2,-1> E1 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,2,0,-1> E1 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,2,1,-1> E1 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,2,2,-1> E1 ## E2 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,0,0,-1> E2 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,0,1,-1> E2 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,0,2,-1> E2 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,1,0,-1> E2 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,1,1,-1> E2 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,1,2,-1> E2 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,2,0,-1> E2 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,2,1,-1> E2 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,2,2,-1> E2 ## E2 ## E2; }; + +#define _GLM_SWIZZLE3_4_MEMBERS(T, P, V, E0,E1,E2) \ + struct { _swizzle<4,T, P, V, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4,T, P, V, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4,T, P, V, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4,T, P, V, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; + +#define _GLM_SWIZZLE4_2_MEMBERS(T, P, V, E0,E1,E2,E3) \ + struct { _swizzle<2,T, P, V, 0,0,-1,-2> E0 ## E0; }; \ + struct { _swizzle<2,T, P, V, 0,1,-1,-2> E0 ## E1; }; \ + struct { _swizzle<2,T, P, V, 0,2,-1,-2> E0 ## E2; }; \ + struct { _swizzle<2,T, P, V, 0,3,-1,-2> E0 ## E3; }; \ + struct { _swizzle<2,T, P, V, 1,0,-1,-2> E1 ## E0; }; \ + struct { _swizzle<2,T, P, V, 1,1,-1,-2> E1 ## E1; }; \ + struct { _swizzle<2,T, P, V, 1,2,-1,-2> E1 ## E2; }; \ + struct { _swizzle<2,T, P, V, 1,3,-1,-2> E1 ## E3; }; \ + struct { _swizzle<2,T, P, V, 2,0,-1,-2> E2 ## E0; }; \ + struct { _swizzle<2,T, P, V, 2,1,-1,-2> E2 ## E1; }; \ + struct { _swizzle<2,T, P, V, 2,2,-1,-2> E2 ## E2; }; \ + struct { _swizzle<2,T, P, V, 2,3,-1,-2> E2 ## E3; }; \ + struct { _swizzle<2,T, P, V, 3,0,-1,-2> E3 ## E0; }; \ + struct { _swizzle<2,T, P, V, 3,1,-1,-2> E3 ## E1; }; \ + struct { _swizzle<2,T, P, V, 3,2,-1,-2> E3 ## E2; }; \ + struct { _swizzle<2,T, P, V, 3,3,-1,-2> E3 ## E3; }; + +#define _GLM_SWIZZLE4_3_MEMBERS(T,P, V, E0,E1,E2,E3) \ + struct { _swizzle<3,T,P, V, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,0,2,-1> E0 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 0,0,3,-1> E0 ## E0 ## E3; }; \ + struct { _swizzle<3,T,P, V, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,1,2,-1> E0 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 0,1,3,-1> E0 ## E1 ## E3; }; \ + struct { _swizzle<3,T,P, V, 0,2,0,-1> E0 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,2,1,-1> E0 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,2,2,-1> E0 ## E2 ## E2; }; \ + struct { _swizzle<3,T,P, V, 0,2,3,-1> E0 ## E2 ## E3; }; \ + struct { _swizzle<3,T,P, V, 0,3,0,-1> E0 ## E3 ## E0; }; \ + struct { _swizzle<3,T,P, V, 0,3,1,-1> E0 ## E3 ## E1; }; \ + struct { _swizzle<3,T,P, V, 0,3,2,-1> E0 ## E3 ## E2; }; \ + struct { _swizzle<3,T,P, V, 0,3,3,-1> E0 ## E3 ## E3; }; \ + struct { _swizzle<3,T,P, V, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,0,2,-1> E1 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,0,3,-1> E1 ## E0 ## E3; }; \ + struct { _swizzle<3,T,P, V, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,1,1,-1> E1 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,1,2,-1> E1 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,1,3,-1> E1 ## E1 ## E3; }; \ + struct { _swizzle<3,T,P, V, 1,2,0,-1> E1 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,2,1,-1> E1 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,2,2,-1> E1 ## E2 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,2,3,-1> E1 ## E2 ## E3; }; \ + struct { _swizzle<3,T,P, V, 1,3,0,-1> E1 ## E3 ## E0; }; \ + struct { _swizzle<3,T,P, V, 1,3,1,-1> E1 ## E3 ## E1; }; \ + struct { _swizzle<3,T,P, V, 1,3,2,-1> E1 ## E3 ## E2; }; \ + struct { _swizzle<3,T,P, V, 1,3,3,-1> E1 ## E3 ## E3; }; \ + struct { _swizzle<3,T,P, V, 2,0,0,-1> E2 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,0,1,-1> E2 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,0,2,-1> E2 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,0,3,-1> E2 ## E0 ## E3; }; \ + struct { _swizzle<3,T,P, V, 2,1,0,-1> E2 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,1,1,-1> E2 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,1,2,-1> E2 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,1,3,-1> E2 ## E1 ## E3; }; \ + struct { _swizzle<3,T,P, V, 2,2,0,-1> E2 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,2,1,-1> E2 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,2,2,-1> E2 ## E2 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,2,3,-1> E2 ## E2 ## E3; }; \ + struct { _swizzle<3,T,P, V, 2,3,0,-1> E2 ## E3 ## E0; }; \ + struct { _swizzle<3,T,P, V, 2,3,1,-1> E2 ## E3 ## E1; }; \ + struct { _swizzle<3,T,P, V, 2,3,2,-1> E2 ## E3 ## E2; }; \ + struct { _swizzle<3,T,P, V, 2,3,3,-1> E2 ## E3 ## E3; }; \ + struct { _swizzle<3,T,P, V, 3,0,0,-1> E3 ## E0 ## E0; }; \ + struct { _swizzle<3,T,P, V, 3,0,1,-1> E3 ## E0 ## E1; }; \ + struct { _swizzle<3,T,P, V, 3,0,2,-1> E3 ## E0 ## E2; }; \ + struct { _swizzle<3,T,P, V, 3,0,3,-1> E3 ## E0 ## E3; }; \ + struct { _swizzle<3,T,P, V, 3,1,0,-1> E3 ## E1 ## E0; }; \ + struct { _swizzle<3,T,P, V, 3,1,1,-1> E3 ## E1 ## E1; }; \ + struct { _swizzle<3,T,P, V, 3,1,2,-1> E3 ## E1 ## E2; }; \ + struct { _swizzle<3,T,P, V, 3,1,3,-1> E3 ## E1 ## E3; }; \ + struct { _swizzle<3,T,P, V, 3,2,0,-1> E3 ## E2 ## E0; }; \ + struct { _swizzle<3,T,P, V, 3,2,1,-1> E3 ## E2 ## E1; }; \ + struct { _swizzle<3,T,P, V, 3,2,2,-1> E3 ## E2 ## E2; }; \ + struct { _swizzle<3,T,P, V, 3,2,3,-1> E3 ## E2 ## E3; }; \ + struct { _swizzle<3,T,P, V, 3,3,0,-1> E3 ## E3 ## E0; }; \ + struct { _swizzle<3,T,P, V, 3,3,1,-1> E3 ## E3 ## E1; }; \ + struct { _swizzle<3,T,P, V, 3,3,2,-1> E3 ## E3 ## E2; }; \ + struct { _swizzle<3,T,P, V, 3,3,3,-1> E3 ## E3 ## E3; }; + +#define _GLM_SWIZZLE4_4_MEMBERS(T, P, V, E0,E1,E2,E3) \ + struct { _swizzle<4, T, P, V, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \ + struct { _swizzle<4, T, P, V, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \ + struct { _swizzle<4, T, P, V, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \ + struct { _swizzle<4, T, P, V, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \ + struct { _swizzle<4, T, P, V, 3,3,3,3> E3 ## E3 ## E3 ## E3; }; + +#endif//glm_core_swizzle diff --git a/deps/glm/detail/_swizzle_func.hpp b/deps/glm/detail/_swizzle_func.hpp new file mode 100644 index 0000000000..471bcd8a71 --- /dev/null +++ b/deps/glm/detail/_swizzle_func.hpp @@ -0,0 +1,724 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/_swizzle_func.hpp +/// @date 2011-10-16 / 2011-10-16 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef glm_core_swizzle_func +#define glm_core_swizzle_func + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, CONST, A, B) \ + SWIZZLED_TYPE A ## B() CONST \ + { \ + return SWIZZLED_TYPE(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, CONST, A, B, C) \ + SWIZZLED_TYPE A ## B ## C() CONST \ + { \ + return SWIZZLED_TYPE(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, CONST, A, B, C, D) \ + SWIZZLED_TYPE A ## B ## C ## D() CONST \ + { \ + return SWIZZLED_TYPE(this->A, this->B, this->C, this->D); \ + } + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, CONST, A, B) \ + template \ + SWIZZLED_TYPE CLASS_TYPE::A ## B() CONST \ + { \ + return SWIZZLED_TYPE(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, CONST, A, B, C) \ + template \ + SWIZZLED_TYPE CLASS_TYPE::A ## B ## C() CONST \ + { \ + return SWIZZLED_TYPE(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, CONST, A, B, C, D) \ + template \ + SWIZZLED_TYPE CLASS_TYPE::A ## B ## C ## D() CONST \ + { \ + return SWIZZLED_TYPE(this->A, this->B, this->C, this->D); \ + } + +#define GLM_MUTABLE + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC2(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, x, y) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, r, g) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, s, t) + +//GLM_SWIZZLE_GEN_REF_FROM_VEC2(valType, detail::vec2, detail::ref2) + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, B) + +#define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, B, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC3_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, A, B, C) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC3(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, x, y, z) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, r, g, b) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, s, t, p) + +//GLM_SWIZZLE_GEN_REF_FROM_VEC3(valType, detail::vec3, detail::ref2, detail::ref3) + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, GLM_MUTABLE, D, C) + +#define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, C, B) + +#define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, , D, B, C, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC3_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC4_TYPE, A, B, C, D) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC4(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, x, y, z, w) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, r, g, b, a) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, s, t, p, q) + +//GLM_SWIZZLE_GEN_REF_FROM_VEC4(valType, detail::vec4, detail::ref2, detail::ref3, detail::ref4) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC3_TYPE, A, B) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC4_TYPE, A, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, x, y) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, r, g) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, s, t) + +//GLM_SWIZZLE_GEN_VEC_FROM_VEC2(valType, detail::vec2, detail::vec2, detail::vec3, detail::vec4) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC3_TYPE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC4_TYPE, A, B, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, x, y, z) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, r, g, b) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, s, t, p) + +//GLM_SWIZZLE_GEN_VEC_FROM_VEC3(valType, detail::vec3, detail::vec2, detail::vec3, detail::vec4) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, A, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, B, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, C, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_TYPE, const, D, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC3_TYPE, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC4_TYPE, A, B, C, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, x, y, z, w) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, r, g, b, a) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(TMPL_TYPE, PRECISION, CLASS_TYPE, SWIZZLED_VEC2_TYPE, SWIZZLED_VEC3_TYPE, SWIZZLED_VEC4_TYPE, s, t, p, q) + +//GLM_SWIZZLE_GEN_VEC_FROM_VEC4(valType, detail::vec4, detail::vec2, detail::vec3, detail::vec4) + +#endif//glm_core_swizzle_func diff --git a/deps/glm/detail/_vectorize.hpp b/deps/glm/detail/_vectorize.hpp new file mode 100644 index 0000000000..6874a321bd --- /dev/null +++ b/deps/glm/detail/_vectorize.hpp @@ -0,0 +1,217 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/_vectorize.hpp +/// @date 2011-10-14 / 2011-10-14 +/// @author Christophe Riccio +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_CORE_DETAIL_INCLUDED +#define GLM_CORE_DETAIL_INCLUDED + +#include "type_vec1.hpp" +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include "type_vec4.hpp" + +#define VECTORIZE1_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec1 func( \ + detail::tvec1 const & v) \ + { \ + return detail::tvec1( \ + func(v.x)); \ + } + +#define VECTORIZE2_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec2 func( \ + detail::tvec2 const & v) \ + { \ + return detail::tvec2( \ + func(v.x), \ + func(v.y)); \ + } + +#define VECTORIZE3_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec3 func( \ + detail::tvec3 const & v) \ + { \ + return detail::tvec3( \ + func(v.x), \ + func(v.y), \ + func(v.z)); \ + } + +#define VECTORIZE4_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec4 func( \ + detail::tvec4 const & v) \ + { \ + return detail::tvec4( \ + func(v.x), \ + func(v.y), \ + func(v.z), \ + func(v.w)); \ + } + +#define VECTORIZE_VEC(func) \ + VECTORIZE1_VEC(func) \ + VECTORIZE2_VEC(func) \ + VECTORIZE3_VEC(func) \ + VECTORIZE4_VEC(func) + +#define VECTORIZE1_VEC_SCA(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec1 func \ + ( \ + detail::tvec1 const & x, \ + T const & y \ + ) \ + { \ + return detail::tvec1( \ + func(x.x, y)); \ + } + +#define VECTORIZE2_VEC_SCA(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec2 func \ + ( \ + detail::tvec2 const & x, \ + T const & y \ + ) \ + { \ + return detail::tvec2( \ + func(x.x, y), \ + func(x.y, y)); \ + } + +#define VECTORIZE3_VEC_SCA(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec3 func \ + ( \ + detail::tvec3 const & x, \ + T const & y \ + ) \ + { \ + return detail::tvec3( \ + func(x.x, y), \ + func(x.y, y), \ + func(x.z, y)); \ + } + +#define VECTORIZE4_VEC_SCA(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec4 func \ + ( \ + detail::tvec4 const & x, \ + T const & y \ + ) \ + { \ + return detail::tvec4( \ + func(x.x, y), \ + func(x.y, y), \ + func(x.z, y), \ + func(x.w, y)); \ + } + +#define VECTORIZE_VEC_SCA(func) \ + VECTORIZE1_VEC_SCA(func) \ + VECTORIZE2_VEC_SCA(func) \ + VECTORIZE3_VEC_SCA(func) \ + VECTORIZE4_VEC_SCA(func) + +#define VECTORIZE2_VEC_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec2 func \ + ( \ + detail::tvec2 const & x, \ + detail::tvec2 const & y \ + ) \ + { \ + return detail::tvec2( \ + func(x.x, y.x), \ + func(x.y, y.y)); \ + } + +#define VECTORIZE3_VEC_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec3 func \ + ( \ + detail::tvec3 const & x, \ + detail::tvec3 const & y \ + ) \ + { \ + return detail::tvec3( \ + func(x.x, y.x), \ + func(x.y, y.y), \ + func(x.z, y.z)); \ + } + +#define VECTORIZE4_VEC_VEC(func) \ + template \ + GLM_FUNC_QUALIFIER detail::tvec4 func \ + ( \ + detail::tvec4 const & x, \ + detail::tvec4 const & y \ + ) \ + { \ + return detail::tvec4( \ + func(x.x, y.x), \ + func(x.y, y.y), \ + func(x.z, y.z), \ + func(x.w, y.w)); \ + } + +#define VECTORIZE_VEC_VEC(func) \ + VECTORIZE2_VEC_VEC(func) \ + VECTORIZE3_VEC_VEC(func) \ + VECTORIZE4_VEC_VEC(func) + +namespace glm{ +namespace detail +{ + template + struct If + { + template + static GLM_FUNC_QUALIFIER T apply(F functor, const T& val) + { + return functor(val); + } + }; + + template<> + struct If + { + template + static GLM_FUNC_QUALIFIER T apply(F, const T& val) + { + return val; + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_CORE_DETAIL_INCLUDED diff --git a/deps/glm/detail/dummy.cpp b/deps/glm/detail/dummy.cpp new file mode 100644 index 0000000000..98ca022ceb --- /dev/null +++ b/deps/glm/detail/dummy.cpp @@ -0,0 +1,190 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/dummy.cpp +/// @date 2011-01-19 / 2011-06-15 +/// @author Christophe Riccio +/// +/// GLM is a header only library. There is nothing to compile. +/// dummy.cpp exist only a wordaround for CMake file. +/////////////////////////////////////////////////////////////////////////////////// + +#define GLM_FORCE_RADIANS +#define GLM_MESSAGES +#include "../glm.hpp" +#include + +struct material +{ + glm::vec4 emission; // Ecm + glm::vec4 ambient; // Acm + glm::vec4 diffuse; // Dcm + glm::vec4 specular; // Scm + float shininess; // Srm +}; +struct light +{ + glm::vec4 ambient; // Acli + glm::vec4 diffuse; // Dcli + glm::vec4 specular; // Scli + glm::vec4 position; // Ppli + glm::vec4 halfVector; // Derived: Hi + glm::vec3 spotDirection; // Sdli + float spotExponent; // Srli + float spotCutoff; // Crli + // (range: [0.0,90.0], 180.0) + float spotCosCutoff; // Derived: cos(Crli) + // (range: [1.0,0.0],-1.0) + float constantAttenuation; // K0 + float linearAttenuation; // K1 + float quadraticAttenuation;// K2 +}; + +// Sample 1 +#include // glm::vec3 +#include // glm::cross, glm::normalize + +glm::vec3 computeNormal +( + glm::vec3 const & a, + glm::vec3 const & b, + glm::vec3 const & c +) +{ + return glm::normalize(glm::cross(c - a, b - a)); +} + +typedef unsigned int GLuint; +#define GL_FALSE 0 +void glUniformMatrix4fv(GLuint, int, int, float*){} + +// Sample 2 +#include // glm::vec3 +#include // glm::vec4, glm::ivec4 +#include // glm::mat4 +#include // glm::translate, glm::rotate, glm::scale, glm::perspective +#include // glm::value_ptr +void func(GLuint LocationMVP, float Translate, glm::vec2 const & Rotate) +{ + glm::mat4 Projection = glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 100.f); + glm::mat4 ViewTranslate = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, -Translate)); + glm::mat4 ViewRotateX = glm::rotate(ViewTranslate, Rotate.y, glm::vec3(-1.0f, 0.0f, 0.0f)); + glm::mat4 View = glm::rotate(ViewRotateX, Rotate.x, glm::vec3(0.0f, 1.0f, 0.0f)); + glm::mat4 Model = glm::scale(glm::mat4(1.0f), glm::vec3(0.5f)); + glm::mat4 MVP = Projection * View * Model; + glUniformMatrix4fv(LocationMVP, 1, GL_FALSE, glm::value_ptr(MVP)); +} + +// Sample 3 +#include // glm::vec2 +#include // glm::packUnorm2x16 +#include // glm::uint +#include // glm::i8vec2, glm::i32vec2 +std::size_t const VertexCount = 4; +// Float quad geometry +std::size_t const PositionSizeF32 = VertexCount * sizeof(glm::vec2); +glm::vec2 const PositionDataF32[VertexCount] = +{ + glm::vec2(-1.0f,-1.0f), + glm::vec2( 1.0f,-1.0f), + glm::vec2( 1.0f, 1.0f), + glm::vec2(-1.0f, 1.0f) + }; +// Half-float quad geometry +std::size_t const PositionSizeF16 = VertexCount * sizeof(glm::uint); +glm::uint const PositionDataF16[VertexCount] = +{ + glm::uint(glm::packUnorm2x16(glm::vec2(-1.0f, -1.0f))), + glm::uint(glm::packUnorm2x16(glm::vec2( 1.0f, -1.0f))), + glm::uint(glm::packUnorm2x16(glm::vec2( 1.0f, 1.0f))), + glm::uint(glm::packUnorm2x16(glm::vec2(-1.0f, 1.0f))) +}; +// 8 bits signed integer quad geometry +std::size_t const PositionSizeI8 = VertexCount * sizeof(glm::i8vec2); +glm::i8vec2 const PositionDataI8[VertexCount] = +{ + glm::i8vec2(-1,-1), + glm::i8vec2( 1,-1), + glm::i8vec2( 1, 1), + glm::i8vec2(-1, 1) +}; +// 32 bits signed integer quad geometry +std::size_t const PositionSizeI32 = VertexCount * sizeof(glm::i32vec2); +glm::i32vec2 const PositionDataI32[VertexCount] = +{ + glm::i32vec2 (-1,-1), + glm::i32vec2 ( 1,-1), + glm::i32vec2 ( 1, 1), + glm::i32vec2 (-1, 1) +}; + +struct intersection +{ + glm::vec4 position; + glm::vec3 normal; +}; + +/* +// Sample 4 +#include // glm::vec3 +#include // glm::normalize, glm::dot, glm::reflect +#include // glm::pow +#include // glm::vecRand3 +glm::vec3 lighting +( + intersection const & Intersection, + material const & Material, + light const & Light, + glm::vec3 const & View +) +{ + glm::vec3 Color(0.0f); + glm::vec3 LightVertor(glm::normalize( + Light.position - Intersection.position + + glm::vecRand3(0.0f, Light.inaccuracy)); + + if(!shadow(Intersection.position, Light.position, LightVertor)) + { + float Diffuse = glm::dot(Intersection.normal, LightVector); + if(Diffuse <= 0.0f) + return Color; + if(Material.isDiffuse()) + Color += Light.color() * Material.diffuse * Diffuse; + if(Material.isSpecular()) + { + glm::vec3 Reflect(glm::reflect( + glm::normalize(-LightVector), + glm::normalize(Intersection.normal))); + float Dot = glm::dot(Reflect, View); + float Base = Dot > 0.0f ? Dot : 0.0f; + float Specular = glm::pow(Base, Material.exponent); + Color += Material.specular * Specular; + } + } + return Color; +} +*/ +int main() +{ + return 0; +} diff --git a/deps/glm/detail/func_common.hpp b/deps/glm/detail/func_common.hpp new file mode 100644 index 0000000000..41fe639d43 --- /dev/null +++ b/deps/glm/detail/func_common.hpp @@ -0,0 +1,472 @@ +/////////////////////////////////////////////////////////////////////////////////// +/// OpenGL Mathematics (glm.g-truc.net) +/// +/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) +/// Permission is hereby granted, free of charge, to any person obtaining a copy +/// of this software and associated documentation files (the "Software"), to deal +/// in the Software without restriction, including without limitation the rights +/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +/// copies of the Software, and to permit persons to whom the Software is +/// furnished to do so, subject to the following conditions: +/// +/// The above copyright notice and this permission notice shall be included in +/// all copies or substantial portions of the Software. +/// +/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +/// THE SOFTWARE. +/// +/// @ref core +/// @file glm/core/func_common.hpp +/// @date 2008-03-08 / 2010-01-26 +/// @author Christophe Riccio +/// +/// @see GLSL 4.20.8 specification, section 8.3 Common Functions +/// +/// @defgroup core_func_common Common functions +/// @ingroup core +/// +/// These all operate component-wise. The description is per component. +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_FUNC_COMMON_INCLUDED +#define GLM_FUNC_COMMON_INCLUDED + +#include "setup.hpp" +#include "precision.hpp" +#include "type_int.hpp" +#include "_fixes.hpp" + +namespace glm +{ + /// @addtogroup core_func_common + /// @{ + + /// Returns x if x >= 0; otherwise, it returns -x. + /// + /// @tparam genType floating-point or signed integer; scalar or vector types. + /// + /// @see GLSL abs man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType abs(genType const & x); + + /// Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0. + /// + /// @tparam genType Floating-point or signed integer; scalar or vector types. + /// + /// @see GLSL sign man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType sign(genType const & x); + + /// Returns a value equal to the nearest integer that is less then or equal to x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL floor man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType floor(genType const & x); + + /// Returns a value equal to the nearest integer to x + /// whose absolute value is not larger than the absolute value of x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL trunc man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType trunc(genType const & x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// This includes the possibility that round(x) returns the + /// same value as roundEven(x) for all values of x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL round man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType round(genType const & x); + + /// Returns a value equal to the nearest integer to x. + /// A fractional part of 0.5 will round toward the nearest even + /// integer. (Both 3.5 and 4.5 for x will return 4.0.) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL roundEven man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + /// @see New round to even technique + template + GLM_FUNC_DECL genType roundEven(genType const & x); + + /// Returns a value equal to the nearest integer + /// that is greater than or equal to x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL ceil man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType ceil(genType const & x); + + /// Return x - floor(x). + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL fract man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType fract(genType const & x); + + /// Modulus. Returns x - y * floor(x / y) + /// for each component in x using the floating point value y. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL mod man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType mod( + genType const & x, + genType const & y); + + /// Modulus. Returns x - y * floor(x / y) + /// for each component in x using the floating point value y. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL mod man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType mod( + genType const & x, + typename genType::value_type const & y); + + /// Returns the fractional part of x and sets i to the integer + /// part (as a whole number floating point value). Both the + /// return value and the output parameter will have the same + /// sign as x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL modf man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType modf( + genType const & x, + genType & i); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions<<<<<<< HEAD + template + GLM_FUNC_DECL genType min( + genType const & x, + genType const & y); + + template + GLM_FUNC_DECL genType min( + genType const & x, + typename genType::value_type const & y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType max( + genType const & x, + genType const & y); + + template + GLM_FUNC_DECL genType max( + genType const & x, + typename genType::value_type const & y); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType clamp( + genType const & x, + genType const & minVal, + genType const & maxVal); + + template + GLM_FUNC_DECL genType clamp( + genType const & x, + typename genType::value_type const & minVal, + typename genType::value_type const & maxVal); + + /// If genTypeU is a floating scalar or vector: + /// Returns x * (1.0 - a) + y * a, i.e., the linear blend of + /// x and y using the floating-point value a. + /// The value for a is not restricted to the range [0, 1]. + /// + /// If genTypeU is a boolean scalar or vector: + /// Selects which vector each returned component comes + /// from. For a component of that is false, the + /// corresponding component of x is returned. For a + /// component of a that is true, the corresponding + /// component of y is returned. Components of x and y that + /// are not selected are allowed to be invalid floating point + /// values and will have no effect on the results. Thus, this + /// provides different functionality than + /// genType mix(genType x, genType y, genType(a)) + /// where a is a Boolean vector. + /// + /// @see GLSL mix man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + /// + /// @param[in] x Value to interpolate. + /// @param[in] y Value to interpolate. + /// @param[in] a Interpolant. + /// + /// @tparam genTypeT Floating point scalar or vector. + /// @tparam genTypeU Floating point or boolean scalar or vector. It can't be a vector if it is the length of genTypeT. + /// + /// @code + /// #include + /// ... + /// float a; + /// bool b; + /// glm::dvec3 e; + /// glm::dvec3 f; + /// glm::vec4 g; + /// glm::vec4 h; + /// ... + /// glm::vec4 r = glm::mix(g, h, a); // Interpolate with a floating-point scalar two vectors. + /// glm::vec4 s = glm::mix(g, h, b); // Teturns g or h; + /// glm::dvec3 t = glm::mix(e, f, a); // Types of the third parameter is not required to match with the first and the second. + /// glm::vec4 u = glm::mix(g, h, r); // Interpolations can be perform per component with a vector for the last parameter. + /// @endcode + template class vecType> + GLM_FUNC_DECL vecType mix( + vecType const & x, + vecType const & y, + vecType const & a); + + template class vecType> + GLM_FUNC_DECL vecType mix( + vecType const & x, + vecType const & y, + U const & a); + + template + GLM_FUNC_DECL genTypeT mix( + genTypeT const & x, + genTypeT const & y, + genTypeU const & a); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType. + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType step( + genType const & edge, + genType const & x); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0. + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template ::is_iec559, "'mix' only accept floating-point inputs for the interpolator a"); + + return vecType(vecType(x) + a * vecType(y - x)); + } + }; + + template class vecType> + struct compute_mix_vector + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x, vecType const & y, vecType const & a) + { + vecType Result; + for(length_t i = 0; i < x.length(); ++i) + Result[i] = a[i] ? y[i] : x[i]; + return Result; + } + }; + + template class vecType> + struct compute_mix_scalar + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x, vecType const & y, U const & a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'mix' only accept floating-point inputs for the interpolator a"); + + return vecType(vecType(x) + a * vecType(y - x)); + } + }; + + template class vecType> + struct compute_mix_scalar + { + GLM_FUNC_QUALIFIER static vecType call(vecType const & x, vecType const & y, bool const & a) + { + return a ? y : x; + } + }; + + template + struct compute_mix + { + GLM_FUNC_QUALIFIER static T call(T const & x, T const & y, U const & a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'mix' only accept floating-point inputs for the interpolator a"); + + return static_cast(static_cast(x) + a * static_cast(y - x)); + } + }; + + template + struct compute_mix + { + GLM_FUNC_QUALIFIER static T call(T const & x, T const & y, bool const & a) + { + return a ? y : x; + } + }; +}//namespace detail + + // abs + template + GLM_FUNC_QUALIFIER genFIType abs + ( + genFIType const & x + ) + { + return detail::compute_abs::is_signed>::call(x); + } + + VECTORIZE_VEC(abs) + + // sign + //Try something like based on x >> 31 to get the sign bit + template + GLM_FUNC_QUALIFIER genFIType sign + ( + genFIType const & x + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || + (std::numeric_limits::is_signed && std::numeric_limits::is_integer), "'sign' only accept signed inputs"); + + genFIType result; + if(x > genFIType(0)) + result = genFIType(1); + else if(x < genFIType(0)) + result = genFIType(-1); + else + result = genFIType(0); + return result; + } + + VECTORIZE_VEC(sign) + + // floor + template + GLM_FUNC_QUALIFIER genType floor(genType const & x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'floor' only accept floating-point inputs"); + + return ::std::floor(x); + } + + VECTORIZE_VEC(floor) + + // trunc + template + GLM_FUNC_QUALIFIER genType trunc(genType const & x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'trunc' only accept floating-point inputs"); + + // TODO, add C++11 std::trunk + return x < 0 ? -floor(-x) : floor(x); + } + + VECTORIZE_VEC(trunc) + + // round + template + GLM_FUNC_QUALIFIER genType round(genType const& x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'round' only accept floating-point inputs"); + + // TODO, add C++11 std::round + return x < 0 ? genType(int(x - genType(0.5))) : genType(int(x + genType(0.5))); + } + + VECTORIZE_VEC(round) + +/* + // roundEven + template + GLM_FUNC_QUALIFIER genType roundEven(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); + + return genType(int(x + genType(int(x) % 2))); + } +*/ + + // roundEven + template + GLM_FUNC_QUALIFIER genType roundEven(genType const & x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'roundEven' only accept floating-point inputs"); + + int Integer = static_cast(x); + genType IntegerPart = static_cast(Integer); + genType FractionalPart = fract(x); + + if(FractionalPart > static_cast(0.5) || FractionalPart < static_cast(0.5)) + { + return round(x); + } + else if((Integer % 2) == 0) + { + return IntegerPart; + } + else if(x <= static_cast(0)) // Work around... + { + return IntegerPart - static_cast(1); + } + else + { + return IntegerPart + static_cast(1); + } + //else // Bug on MinGW 4.5.2 + //{ + // return mix(IntegerPart + genType(-1), IntegerPart + genType(1), x <= genType(0)); + //} + } + + VECTORIZE_VEC(roundEven) + + // ceil + template + GLM_FUNC_QUALIFIER genType ceil(genType const & x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'ceil' only accept floating-point inputs"); + + return ::std::ceil(x); + } + + VECTORIZE_VEC(ceil) + + // fract + template + GLM_FUNC_QUALIFIER genType fract + ( + genType const & x + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'fract' only accept floating-point inputs"); + + return x - floor(x); + } + + VECTORIZE_VEC(fract) + + // mod + template + GLM_FUNC_QUALIFIER genType mod + ( + genType const & x, + genType const & y + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'mod' only accept floating-point inputs"); + + return x - y * floor(x / y); + } + + VECTORIZE_VEC_SCA(mod) + VECTORIZE_VEC_VEC(mod) + + // modf + template + GLM_FUNC_QUALIFIER genType modf + ( + genType const & x, + genType & i + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559, + "'modf' only accept floating-point inputs"); + + return std::modf(x, &i); + } + + template + GLM_FUNC_QUALIFIER detail::tvec2 modf + ( + detail::tvec2 const & x, + detail::tvec2 & i + ) + { + return detail::tvec2( + modf(x.x, i.x), + modf(x.y, i.y)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec3 modf + ( + detail::tvec3 const & x, + detail::tvec3 & i + ) + { + return detail::tvec3( + modf(x.x, i.x), + modf(x.y, i.y), + modf(x.z, i.z)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec4 modf + ( + detail::tvec4 const & x, + detail::tvec4 & i + ) + { + return detail::tvec4( + modf(x.x, i.x), + modf(x.y, i.y), + modf(x.z, i.z), + modf(x.w, i.w)); + } + + //// Only valid if (INT_MIN <= x-y <= INT_MAX) + //// min(x,y) + //r = y + ((x - y) & ((x - y) >> (sizeof(int) * + //CHAR_BIT - 1))); + //// max(x,y) + //r = x - ((x - y) & ((x - y) >> (sizeof(int) * + //CHAR_BIT - 1))); + + // min + template + GLM_FUNC_QUALIFIER genType min + ( + genType const & x, + genType const & y + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'min' only accept floating-point or integer inputs"); + + return x < y ? x : y; + } + + VECTORIZE_VEC_SCA(min) + VECTORIZE_VEC_VEC(min) + + // max + template + GLM_FUNC_QUALIFIER genType max + ( + genType const & x, + genType const & y + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'max' only accept floating-point or integer inputs"); + + return x > y ? x : y; + } + + VECTORIZE_VEC_SCA(max) + VECTORIZE_VEC_VEC(max) + + // clamp + template + GLM_FUNC_QUALIFIER genType clamp + ( + genType const & x, + genType const & minVal, + genType const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return min(maxVal, max(minVal, x)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec2 clamp + ( + detail::tvec2 const & x, + T const & minVal, + T const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return detail::tvec2( + clamp(x.x, minVal, maxVal), + clamp(x.y, minVal, maxVal)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec3 clamp + ( + detail::tvec3 const & x, + T const & minVal, + T const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return detail::tvec3( + clamp(x.x, minVal, maxVal), + clamp(x.y, minVal, maxVal), + clamp(x.z, minVal, maxVal)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec4 clamp + ( + detail::tvec4 const & x, + T const & minVal, + T const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return detail::tvec4( + clamp(x.x, minVal, maxVal), + clamp(x.y, minVal, maxVal), + clamp(x.z, minVal, maxVal), + clamp(x.w, minVal, maxVal)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec2 clamp + ( + detail::tvec2 const & x, + detail::tvec2 const & minVal, + detail::tvec2 const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return detail::tvec2( + clamp(x.x, minVal.x, maxVal.x), + clamp(x.y, minVal.y, maxVal.y)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec3 clamp + ( + detail::tvec3 const & x, + detail::tvec3 const & minVal, + detail::tvec3 const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return detail::tvec3( + clamp(x.x, minVal.x, maxVal.x), + clamp(x.y, minVal.y, maxVal.y), + clamp(x.z, minVal.z, maxVal.z)); + } + + template + GLM_FUNC_QUALIFIER detail::tvec4 clamp + ( + detail::tvec4 const & x, + detail::tvec4 const & minVal, + detail::tvec4 const & maxVal + ) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, + "'clamp' only accept floating-point or integer inputs"); + + return detail::tvec4( + clamp(x.x, minVal.x, maxVal.x), + clamp(x.y, minVal.y, maxVal.y), + clamp(x.z, minVal.z, maxVal.z), + clamp(x.w, minVal.w, maxVal.w)); + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType mix + ( + vecType const & x, + vecType const & y, + vecType const & a + ) + { + return detail::compute_mix_vector::call(x, y, a); + } + + template class vecType> + GLM_FUNC_QUALIFIER vecType mix + ( + vecType const & x, + vecType const & y, + U const & a + ) + { + return detail::compute_mix_scalar::call(x, y, a); + } + + template + GLM_FUNC_QUALIFIER genTypeT mix + ( + genTypeT const & x, + genTypeT const & y, + genTypeU const & a + ) + { + return detail::compute_mix::call(x, y, a); + } + + // step + template + GLM_FUNC_QUALIFIER genType step + ( + genType const & edge, + genType const & x + ) + { + return mix(genType(1), genType(0), glm::lessThan(x, edge)); + } + + template