diff --git a/Utilities/recursive_wrapper.hpp b/Utilities/recursive_wrapper.hpp new file mode 100644 index 0000000000..82317e1695 --- /dev/null +++ b/Utilities/recursive_wrapper.hpp @@ -0,0 +1,124 @@ +#ifndef MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP +#define MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP + +// Based on variant/recursive_wrapper.hpp from boost. +// +// Original license: +// +// Copyright (c) 2002-2003 +// Eric Friedman, Itay Maman +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +//namespace mapbox { +//namespace util { +namespace std { + +template +class recursive_wrapper +{ + + T* p_; + + void assign(T const& rhs) + { + this->get() = rhs; + } + +public: + using type = T; + + /** + * Default constructor default initializes the internally stored value. + * For POD types this means nothing is done and the storage is + * uninitialized. + * + * @throws std::bad_alloc if there is insufficient memory for an object + * of type T. + * @throws any exception thrown by the default constructur of T. + */ + recursive_wrapper() + : p_(new T){} + + ~recursive_wrapper() noexcept { delete p_; } + + recursive_wrapper(recursive_wrapper const& operand) + : p_(new T(operand.get())) {} + + recursive_wrapper(T const& operand) + : p_(new T(operand)) {} + + recursive_wrapper(recursive_wrapper&& operand) + : p_(new T(std::move(operand.get()))) {} + + recursive_wrapper(T&& operand) + : p_(new T(std::move(operand))) {} + + inline recursive_wrapper& operator=(recursive_wrapper const& rhs) + { + assign(rhs.get()); + return *this; + } + + inline recursive_wrapper& operator=(T const& rhs) + { + assign(rhs); + return *this; + } + + inline void swap(recursive_wrapper& operand) noexcept + { + T* temp = operand.p_; + operand.p_ = p_; + p_ = temp; + } + + recursive_wrapper& operator=(recursive_wrapper&& rhs) noexcept + { + swap(rhs); + return *this; + } + + recursive_wrapper& operator=(T&& rhs) + { + get() = std::move(rhs); + return *this; + } + + T& get() + { + assert(p_); + return *get_pointer(); + } + + T const& get() const + { + assert(p_); + return *get_pointer(); + } + + T* get_pointer() { return p_; } + + const T* get_pointer() const { return p_; } + + operator T const&() const { return this->get(); } + + operator T&() { return this->get(); } + +}; // class recursive_wrapper + +template +inline void swap(recursive_wrapper& lhs, recursive_wrapper& rhs) noexcept +{ + lhs.swap(rhs); +} +} +//} // namespace util +//} // namespace mapbox + +#endif // MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP diff --git a/Utilities/variant.hpp b/Utilities/variant.hpp new file mode 100644 index 0000000000..b712445bb7 --- /dev/null +++ b/Utilities/variant.hpp @@ -0,0 +1,973 @@ +#ifndef MAPBOX_UTIL_VARIANT_HPP +#define MAPBOX_UTIL_VARIANT_HPP + +#include +#include // size_t +#include // operator new +#include // runtime_error +#include +#include +#include +#include +#include + +#include "recursive_wrapper.hpp" + +// clang-format off +// [[deprecated]] is only available in C++14, use this for the time being +#if __cplusplus <= 201103L +# ifdef __GNUC__ +# define MAPBOX_VARIANT_DEPRECATED __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define MAPBOX_VARIANT_DEPRECATED __declspec(deprecated) +# else +# define MAPBOX_VARIANT_DEPRECATED +# endif +#else +# define MAPBOX_VARIANT_DEPRECATED [[deprecated]] +#endif + + +#ifdef _MSC_VER +// https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx +# ifdef NDEBUG +# define VARIANT_INLINE __forceinline +# else +# define VARIANT_INLINE //__declspec(noinline) +# endif +#else +# ifdef NDEBUG +# define VARIANT_INLINE //inline __attribute__((always_inline)) +# else +# define VARIANT_INLINE __attribute__((noinline)) +# endif +#endif +// clang-format on + +// Exceptions +#if defined( __EXCEPTIONS) || defined( _MSC_VER) +#define HAS_EXCEPTIONS +#endif + +#define VARIANT_MAJOR_VERSION 1 +#define VARIANT_MINOR_VERSION 1 +#define VARIANT_PATCH_VERSION 0 + +#define VARIANT_VERSION (VARIANT_MAJOR_VERSION * 100000) + (VARIANT_MINOR_VERSION * 100) + (VARIANT_PATCH_VERSION) + +//namespace mapbox { +// namespace util { +namespace std { + + // XXX This should derive from std::logic_error instead of std::runtime_error. + // See https://github.com/mapbox/variant/issues/48 for details. + class bad_variant_access : public std::runtime_error + { + + public: + explicit bad_variant_access(const std::string& what_arg) + : runtime_error(what_arg) {} + + explicit bad_variant_access(const char* what_arg) + : runtime_error(what_arg) {} + + }; // class bad_variant_access + + template + struct MAPBOX_VARIANT_DEPRECATED static_visitor + { + using result_type = R; + + protected: + static_visitor() {} + ~static_visitor() {} + }; + + namespace detail { + + static constexpr std::size_t invalid_value = std::size_t(-1); + + template + struct direct_type; + + template + struct direct_type + { + static constexpr std::size_t index = std::is_same::value + ? sizeof...(Types) + : direct_type::index; + }; + + template + struct direct_type + { + static constexpr std::size_t index = invalid_value; + }; + +#if __cpp_lib_logical_traits >= 201510L + + using std::disjunction; + +#else + + template + struct disjunction : std::false_type {}; + + template + struct disjunction : B1 {}; + + template + struct disjunction : std::conditional::type {}; + + template + struct disjunction : std::conditional>::type {}; + +#endif + + template + struct convertible_type; + + template + struct convertible_type + { + static constexpr std::size_t index = std::is_convertible::value + ? disjunction...>::value ? invalid_value : sizeof...(Types) + : convertible_type::index; + }; + + template + struct convertible_type + { + static constexpr std::size_t index = invalid_value; + }; + + template + struct value_traits + { + using value_type = typename std::remove_const::type>::type; + static constexpr std::size_t direct_index = direct_type::index; + static constexpr bool is_direct = direct_index != invalid_value; + static constexpr std::size_t index = is_direct ? direct_index : convertible_type::index; + static constexpr bool is_valid = index != invalid_value; + static constexpr std::size_t tindex = is_valid ? sizeof...(Types)-index : 0; + using target_type = typename std::tuple_element>::type; + }; + + template + struct enable_if_type + { + using type = R; + }; + + template + struct result_of_unary_visit + { + using type = typename std::result_of::type; + }; + + template + struct result_of_unary_visit::type> + { + using type = typename F::result_type; + }; + + template + struct result_of_binary_visit + { + using type = typename std::result_of::type; + }; + + template + struct result_of_binary_visit::type> + { + using type = typename F::result_type; + }; + + template + struct static_max; + + template + struct static_max + { + static const std::size_t value = arg; + }; + + template + struct static_max + { + static const std::size_t value = arg1 >= arg2 ? static_max::value : static_max::value; + }; + + template + struct variant_helper; + + template + struct variant_helper + { + VARIANT_INLINE static void destroy(const std::size_t type_index, void* data) + { + if (type_index == sizeof...(Types)) + { + reinterpret_cast(data)->~T(); + } + else + { + variant_helper::destroy(type_index, data); + } + } + + VARIANT_INLINE static void move(const std::size_t old_type_index, void* old_value, void* new_value) + { + if (old_type_index == sizeof...(Types)) + { + new (new_value) T(std::move(*reinterpret_cast(old_value))); + } + else + { + variant_helper::move(old_type_index, old_value, new_value); + } + } + + VARIANT_INLINE static void copy(const std::size_t old_type_index, const void* old_value, void* new_value) + { + if (old_type_index == sizeof...(Types)) + { + new (new_value) T(*reinterpret_cast(old_value)); + } + else + { + variant_helper::copy(old_type_index, old_value, new_value); + } + } + }; + + template <> + struct variant_helper<> + { + VARIANT_INLINE static void destroy(const std::size_t, void*) {} + VARIANT_INLINE static void move(const std::size_t, void*, void*) {} + VARIANT_INLINE static void copy(const std::size_t, const void*, void*) {} + }; + + template + struct unwrapper + { + static T const& apply_const(T const& obj) { return obj; } + static T& apply(T& obj) { return obj; } + }; + + template + struct unwrapper> + { + static auto apply_const(recursive_wrapper const& obj) + -> typename recursive_wrapper::type const& + { + return obj.get(); + } + static auto apply(recursive_wrapper& obj) + -> typename recursive_wrapper::type& + { + return obj.get(); + } + }; + + template + struct unwrapper> + { + static auto apply_const(std::reference_wrapper const& obj) + -> typename std::reference_wrapper::type const& + { + return obj.get(); + } + static auto apply(std::reference_wrapper& obj) + -> typename std::reference_wrapper::type& + { + return obj.get(); + } + }; + + template + struct dispatcher; + + template + struct dispatcher + { + VARIANT_INLINE static R apply_const(V const& v, F&& f) + { + if (v.template is()) + { + return f(unwrapper::apply_const(v.template get_unchecked())); + } + else + { + return dispatcher::apply_const(v, std::forward(f)); + } + } + + VARIANT_INLINE static R apply(V& v, F&& f) + { + if (v.template is()) + { + return f(unwrapper::apply(v.template get_unchecked())); + } + else + { + return dispatcher::apply(v, std::forward(f)); + } + } + }; + + template + struct dispatcher + { + VARIANT_INLINE static R apply_const(V const& v, F&& f) + { + return f(unwrapper::apply_const(v.template get_unchecked())); + } + + VARIANT_INLINE static R apply(V& v, F&& f) + { + return f(unwrapper::apply(v.template get_unchecked())); + } + }; + + template + struct binary_dispatcher_rhs; + + template + struct binary_dispatcher_rhs + { + VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f) + { + if (rhs.template is()) // call binary functor + { + return f(unwrapper::apply_const(lhs.template get_unchecked()), + unwrapper::apply_const(rhs.template get_unchecked())); + } + else + { + return binary_dispatcher_rhs::apply_const(lhs, rhs, std::forward(f)); + } + } + + VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f) + { + if (rhs.template is()) // call binary functor + { + return f(unwrapper::apply(lhs.template get_unchecked()), + unwrapper::apply(rhs.template get_unchecked())); + } + else + { + return binary_dispatcher_rhs::apply(lhs, rhs, std::forward(f)); + } + } + }; + + template + struct binary_dispatcher_rhs + { + VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f) + { + return f(unwrapper::apply_const(lhs.template get_unchecked()), + unwrapper::apply_const(rhs.template get_unchecked())); + } + + VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f) + { + return f(unwrapper::apply(lhs.template get_unchecked()), + unwrapper::apply(rhs.template get_unchecked())); + } + }; + + template + struct binary_dispatcher_lhs; + + template + struct binary_dispatcher_lhs + { + VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f) + { + if (lhs.template is()) // call binary functor + { + return f(unwrapper::apply_const(lhs.template get_unchecked()), + unwrapper::apply_const(rhs.template get_unchecked())); + } + else + { + return binary_dispatcher_lhs::apply_const(lhs, rhs, std::forward(f)); + } + } + + VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f) + { + if (lhs.template is()) // call binary functor + { + return f(unwrapper::apply(lhs.template get_unchecked()), + unwrapper::apply(rhs.template get_unchecked())); + } + else + { + return binary_dispatcher_lhs::apply(lhs, rhs, std::forward(f)); + } + } + }; + + template + struct binary_dispatcher_lhs + { + VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f) + { + return f(unwrapper::apply_const(lhs.template get_unchecked()), + unwrapper::apply_const(rhs.template get_unchecked())); + } + + VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f) + { + return f(unwrapper::apply(lhs.template get_unchecked()), + unwrapper::apply(rhs.template get_unchecked())); + } + }; + + template + struct binary_dispatcher; + + template + struct binary_dispatcher + { + VARIANT_INLINE static R apply_const(V const& v0, V const& v1, F&& f) + { + if (v0.template is()) + { + if (v1.template is()) + { + return f(unwrapper::apply_const(v0.template get_unchecked()), + unwrapper::apply_const(v1.template get_unchecked())); // call binary functor + } + else + { + return binary_dispatcher_rhs::apply_const(v0, v1, std::forward(f)); + } + } + else if (v1.template is()) + { + return binary_dispatcher_lhs::apply_const(v0, v1, std::forward(f)); + } + return binary_dispatcher::apply_const(v0, v1, std::forward(f)); + } + + VARIANT_INLINE static R apply(V& v0, V& v1, F&& f) + { + if (v0.template is()) + { + if (v1.template is()) + { + return f(unwrapper::apply(v0.template get_unchecked()), + unwrapper::apply(v1.template get_unchecked())); // call binary functor + } + else + { + return binary_dispatcher_rhs::apply(v0, v1, std::forward(f)); + } + } + else if (v1.template is()) + { + return binary_dispatcher_lhs::apply(v0, v1, std::forward(f)); + } + return binary_dispatcher::apply(v0, v1, std::forward(f)); + } + }; + + template + struct binary_dispatcher + { + VARIANT_INLINE static R apply_const(V const& v0, V const& v1, F&& f) + { + return f(unwrapper::apply_const(v0.template get_unchecked()), + unwrapper::apply_const(v1.template get_unchecked())); // call binary functor + } + + VARIANT_INLINE static R apply(V& v0, V& v1, F&& f) + { + return f(unwrapper::apply(v0.template get_unchecked()), + unwrapper::apply(v1.template get_unchecked())); // call binary functor + } + }; + + // comparator functors + struct equal_comp + { + template + bool operator()(T const& lhs, T const& rhs) const + { + return lhs == rhs; + } + }; + + struct less_comp + { + template + bool operator()(T const& lhs, T const& rhs) const + { + return lhs < rhs; + } + }; + + template + class comparer + { + public: + explicit comparer(Variant const& lhs) noexcept + : lhs_(lhs) {} + comparer& operator=(comparer const&) = delete; + // visitor + template + bool operator()(T const& rhs_content) const + { + T const& lhs_content = lhs_.template get_unchecked(); + return Comp()(lhs_content, rhs_content); + } + + private: + Variant const& lhs_; + }; + + } // namespace detail + + struct no_init + { + }; + + template + class variant + { + static_assert(sizeof...(Types) > 0, "Template parameter type list of variant can not be empty"); + static_assert(!detail::disjunction...>::value, "Variant can not hold reference types. Maybe use std::reference_wrapper?"); + + private: + static const std::size_t data_size = detail::static_max::value; + static const std::size_t data_align = detail::static_max::value; + + using first_type = typename std::tuple_element<0, std::tuple>::type; + using data_type = typename std::aligned_storage::type; + using helper_type = detail::variant_helper; + + std::size_t type_index; + data_type data; + + public: + VARIANT_INLINE variant() noexcept(std::is_nothrow_default_constructible::value) + : type_index(sizeof...(Types)-1) + { + static_assert(std::is_default_constructible::value, "First type in variant must be default constructible to allow default construction of variant"); + new (&data) first_type(); + } + + VARIANT_INLINE variant(no_init) noexcept + : type_index(detail::invalid_value) {} + + // http://isocpp.org/blog/2012/11/universal-references-in-c11-scott-meyers + template , + typename Enable = typename std::enable_if::type> + VARIANT_INLINE variant(T&& val) noexcept(std::is_nothrow_constructible::value) + : type_index(Traits::index) + { + new (&data) typename Traits::target_type(std::forward(val)); + } + + VARIANT_INLINE variant(variant const& old) + : type_index(old.type_index) + { + helper_type::copy(old.type_index, &old.data, &data); + } + + VARIANT_INLINE variant(variant&& old) noexcept(std::is_nothrow_move_constructible>::value) + : type_index(old.type_index) + { + helper_type::move(old.type_index, &old.data, &data); + } + + private: + VARIANT_INLINE void copy_assign(variant const& rhs) + { + helper_type::destroy(type_index, &data); + type_index = detail::invalid_value; + helper_type::copy(rhs.type_index, &rhs.data, &data); + type_index = rhs.type_index; + } + + VARIANT_INLINE void move_assign(variant&& rhs) + { + helper_type::destroy(type_index, &data); + type_index = detail::invalid_value; + helper_type::move(rhs.type_index, &rhs.data, &data); + type_index = rhs.type_index; + } + + public: + VARIANT_INLINE variant& operator=(variant&& other) + { + move_assign(std::move(other)); + return *this; + } + + VARIANT_INLINE variant& operator=(variant const& other) + { + copy_assign(other); + return *this; + } + + // conversions + // move-assign + template + VARIANT_INLINE variant& operator=(T&& rhs) noexcept + { + variant temp(std::forward(rhs)); + move_assign(std::move(temp)); + return *this; + } + + // copy-assign + template + VARIANT_INLINE variant& operator=(T const& rhs) + { + variant temp(rhs); + copy_assign(temp); + return *this; + } + + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE bool is() const + { + return type_index == detail::direct_type::index; + } + + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE bool is() const + { + return type_index == detail::direct_type, Types...>::index; + } + + VARIANT_INLINE bool valid() const + { + return type_index != detail::invalid_value; + } + + template + VARIANT_INLINE void set(Args&&... args) + { + helper_type::destroy(type_index, &data); + type_index = detail::invalid_value; + new (&data) T(std::forward(args)...); + type_index = detail::direct_type::index; + } + + // get_unchecked() + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get_unchecked() + { + return *reinterpret_cast(&data); + } + +#ifdef HAS_EXCEPTIONS + // get() + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get() + { + if (type_index == detail::direct_type::index) + { + return *reinterpret_cast(&data); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get_unchecked() const + { + return *reinterpret_cast(&data); + } + +#ifdef HAS_EXCEPTIONS + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get() const + { + if (type_index == detail::direct_type::index) + { + return *reinterpret_cast(&data); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + // get_unchecked() - T stored as recursive_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get_unchecked() + { + return (*reinterpret_cast*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + // get() - T stored as recursive_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get() + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get_unchecked() const + { + return (*reinterpret_cast const*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get() const + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast const*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + // get_unchecked() - T stored as std::reference_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get_unchecked() + { + return (*reinterpret_cast*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + // get() - T stored as std::reference_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get() + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get_unchecked() const + { + return (*reinterpret_cast const*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get() const + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast const*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + // This function is deprecated because it returns an internal index field. + // Use which() instead. + MAPBOX_VARIANT_DEPRECATED VARIANT_INLINE std::size_t get_type_index() const + { + return type_index; + } + + VARIANT_INLINE int which() const noexcept + { + return static_cast(sizeof...(Types)-type_index - 1); + } + + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE static constexpr int which() noexcept + { + return static_cast(sizeof...(Types)-detail::direct_type::index - 1); + } + + // visitor + // unary + template ::type> + auto VARIANT_INLINE static visit(V const& v, F&& f) + -> decltype(detail::dispatcher::apply_const(v, std::forward(f))) + { + return detail::dispatcher::apply_const(v, std::forward(f)); + } + // non-const + template ::type> + auto VARIANT_INLINE static visit(V& v, F&& f) + -> decltype(detail::dispatcher::apply(v, std::forward(f))) + { + return detail::dispatcher::apply(v, std::forward(f)); + } + + // binary + // const + template ::type> + auto VARIANT_INLINE static binary_visit(V const& v0, V const& v1, F&& f) + -> decltype(detail::binary_dispatcher::apply_const(v0, v1, std::forward(f))) + { + return detail::binary_dispatcher::apply_const(v0, v1, std::forward(f)); + } + // non-const + template ::type> + auto VARIANT_INLINE static binary_visit(V& v0, V& v1, F&& f) + -> decltype(detail::binary_dispatcher::apply(v0, v1, std::forward(f))) + { + return detail::binary_dispatcher::apply(v0, v1, std::forward(f)); + } + + ~variant() noexcept // no-throw destructor + { + helper_type::destroy(type_index, &data); + } + + // comparison operators + // equality + VARIANT_INLINE bool operator==(variant const& rhs) const + { + assert(valid() && rhs.valid()); + if (this->which() != rhs.which()) + { + return false; + } + detail::comparer visitor(*this); + return visit(rhs, visitor); + } + + VARIANT_INLINE bool operator!=(variant const& rhs) const + { + return !(*this == rhs); + } + + // less than + VARIANT_INLINE bool operator<(variant const& rhs) const + { + assert(valid() && rhs.valid()); + if (this->which() != rhs.which()) + { + return this->which() < rhs.which(); + } + detail::comparer visitor(*this); + return visit(rhs, visitor); + } + VARIANT_INLINE bool operator>(variant const& rhs) const + { + return rhs < *this; + } + VARIANT_INLINE bool operator<=(variant const& rhs) const + { + return !(*this > rhs); + } + VARIANT_INLINE bool operator>=(variant const& rhs) const + { + return !(*this < rhs); + } + }; + + // unary visitor interface + // const + template + auto VARIANT_INLINE apply_visitor(F&& f, V const& v) -> decltype(V::visit(v, std::forward(f))) + { + return V::visit(v, std::forward(f)); + } + + // non-const + template + auto VARIANT_INLINE apply_visitor(F&& f, V& v) -> decltype(V::visit(v, std::forward(f))) + { + return V::visit(v, std::forward(f)); + } + + // binary visitor interface + // const + template + auto VARIANT_INLINE apply_visitor(F&& f, V const& v0, V const& v1) -> decltype(V::binary_visit(v0, v1, std::forward(f))) + { + return V::binary_visit(v0, v1, std::forward(f)); + } + + // non-const + template + auto VARIANT_INLINE apply_visitor(F&& f, V& v0, V& v1) -> decltype(V::binary_visit(v0, v1, std::forward(f))) + { + return V::binary_visit(v0, v1, std::forward(f)); + } + + // getter interface + +#ifdef HAS_EXCEPTIONS + template + auto get(T& var)->decltype(var.template get()) + { + return var.template get(); + } +#endif + + template + ResultType& get_unchecked(T& var) + { + return var.template get_unchecked(); + } + +#ifdef HAS_EXCEPTIONS + template + auto get(T const& var)->decltype(var.template get()) + { + return var.template get(); + } +#endif + + template + ResultType const& get_unchecked(T const& var) + { + return var.template get_unchecked(); + } + } +// } // namespace util +//} // namespace mapbox + +#endif // MAPBOX_UTIL_VARIANT_HPP \ No newline at end of file diff --git a/rpcs3/Emu/RSX/Common/BufferUtils.cpp b/rpcs3/Emu/RSX/Common/BufferUtils.cpp index 1e38b72f7a..f6a16c1419 100644 --- a/rpcs3/Emu/RSX/Common/BufferUtils.cpp +++ b/rpcs3/Emu/RSX/Common/BufferUtils.cpp @@ -32,12 +32,12 @@ namespace return{ X, Y, Z, 1 }; } - template - void copy_whole_attribute_array(gsl::span dst, const gsl::byte* src_ptr, u8 attribute_size, u8 dst_stride, u32 src_stride, u32 first, u32 vertex_count) + template + void copy_whole_attribute_array(gsl::span dst, gsl::span src_ptr, u8 attribute_size, u8 dst_stride, u32 src_stride, u32 vertex_count) { for (u32 vertex = 0; vertex < vertex_count; ++vertex) { - const U* src = reinterpret_cast(src_ptr + src_stride * (first + vertex)); + gsl::span src = gsl::as_span(src_ptr.subspan(src_stride * vertex, attribute_size * sizeof(const U))); for (u32 i = 0; i < attribute_size; ++i) { dst[vertex * dst_stride / sizeof(T) + i] = src[i]; @@ -46,7 +46,7 @@ namespace } } -void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride) +void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, gsl::span src_ptr, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride) { verify(HERE), (vector_element_count > 0); @@ -56,7 +56,7 @@ void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const case rsx::vertex_base_type::ub256: { gsl::span dst_span = as_span_workaround(raw_dst_span); - copy_whole_attribute_array(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count); + copy_whole_attribute_array(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, count); return; } case rsx::vertex_base_type::s1: @@ -64,13 +64,13 @@ void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const case rsx::vertex_base_type::s32k: { gsl::span dst_span = as_span_workaround(raw_dst_span); - copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count); + copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, count); return; } case rsx::vertex_base_type::f: { gsl::span dst_span = as_span_workaround(raw_dst_span); - copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count); + copy_whole_attribute_array>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, count); return; } case rsx::vertex_base_type::cmp: @@ -78,8 +78,11 @@ void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::span dst_span = as_span_workaround(raw_dst_span); for (u32 i = 0; i < count; ++i) { - auto* c_src = (const be_t*)(src_ptr + attribute_src_stride * (first + i)); - const auto& decoded_vector = decode_cmp_vector(*c_src); + be_t src_value; + memcpy(&src_value, + src_ptr.subspan(attribute_src_stride * i).data(), + sizeof(be_t)); + const auto& decoded_vector = decode_cmp_vector(src_value); dst_span[i * dst_stride / sizeof(u16)] = decoded_vector[0]; dst_span[i * dst_stride / sizeof(u16) + 1] = decoded_vector[1]; dst_span[i * dst_stride / sizeof(u16) + 2] = decoded_vector[2]; diff --git a/rpcs3/Emu/RSX/Common/BufferUtils.h b/rpcs3/Emu/RSX/Common/BufferUtils.h index 1edcef45a6..63910f5244 100644 --- a/rpcs3/Emu/RSX/Common/BufferUtils.h +++ b/rpcs3/Emu/RSX/Common/BufferUtils.h @@ -7,10 +7,10 @@ #include "../RSXThread.h" /** - * Write count vertex attributes from src_ptr starting at first. + * Write count vertex attributes from src_ptr. * src_ptr array layout is deduced from the type, vector element count and src_stride arguments. */ -void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride); +void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, gsl::span src_ptr, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride); /* * If primitive mode is not supported and need to be emulated (using an index buffer) returns false. diff --git a/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp b/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp index e560001945..d4ffe889c4 100644 --- a/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp +++ b/rpcs3/Emu/RSX/D3D12/D3D12Buffer.cpp @@ -7,6 +7,7 @@ #include "../Common/BufferUtils.h" #include "D3D12Formats.h" #include "../rsx_methods.h" +#include namespace { @@ -80,77 +81,90 @@ namespace } } +namespace +{ + + struct vertex_buffer_visitor + { + std::vector vertex_buffer_views; + + vertex_buffer_visitor(u32 vtx_cnt, ID3D12GraphicsCommandList* cmdlst, ID3D12Resource* write_vertex_buffer, + d3d12_data_heap& heap) + : vertex_count(vtx_cnt) + , offset_in_vertex_buffers_buffer(0) + , m_buffer_data(heap) + , command_list(cmdlst) + , m_vertex_buffer_data(write_vertex_buffer) + { + } + + void operator()(const rsx::vertex_array_buffer& vertex_array) + { + u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size); + UINT buffer_size = element_size * vertex_count; + size_t heap_offset = m_buffer_data.alloc(buffer_size); + + void* mapped_buffer = m_buffer_data.map(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); + gsl::span mapped_buffer_span = {(gsl::byte*)mapped_buffer, gsl::narrow_cast(buffer_size)}; + write_vertex_array_data_to_buffer(mapped_buffer_span, vertex_array.data, vertex_count, vertex_array.type, vertex_array.attribute_size, vertex_array.stride, element_size); + + m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); + + command_list->CopyBufferRegion(m_vertex_buffer_data, offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size); + + vertex_buffer_views.emplace_back(get_vertex_attribute_srv(vertex_array.type, vertex_array.attribute_size, offset_in_vertex_buffers_buffer, buffer_size)); + offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16 + + //m_timers.buffer_upload_size += buffer_size; + } + + void operator()(const rsx::vertex_array_register& vertex_register) + { + u32 element_size = rsx::get_vertex_type_size_on_host(vertex_register.type, vertex_register.attribute_size); + UINT buffer_size = element_size; + size_t heap_offset = m_buffer_data.alloc(buffer_size); + + void* mapped_buffer = m_buffer_data.map(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); + memcpy(mapped_buffer, vertex_register.data.data(), buffer_size); + m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); + + command_list->CopyBufferRegion(m_vertex_buffer_data, offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size); + + vertex_buffer_views.emplace_back(get_vertex_attribute_srv(vertex_register.type, vertex_register.attribute_size, offset_in_vertex_buffers_buffer, buffer_size)); + offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16 + } + + void operator()(const rsx::empty_vertex_array& vbo) + { + } + + protected: + u32 vertex_count; + ID3D12GraphicsCommandList* command_list; + ID3D12Resource* m_vertex_buffer_data; + size_t offset_in_vertex_buffers_buffer; + d3d12_data_heap& m_buffer_data; + }; + +} // End anonymous namespace std::vector D3D12GSRender::upload_vertex_attributes( - const std::vector > &vertex_ranges, - gsl::not_null command_list) + const std::vector>& vertex_ranges, + gsl::not_null command_list) { - std::vector vertex_buffer_views; command_list->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_vertex_buffer_data.Get(), D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER, D3D12_RESOURCE_STATE_COPY_DEST)); u32 vertex_count = get_vertex_count(vertex_ranges); - size_t offset_in_vertex_buffers_buffer = 0; - u32 input_mask = rsx::method_registers.vertex_attrib_input_mask(); verify(HERE), rsx::method_registers.vertex_data_base_index() == 0; - for (int index = 0; index < rsx::limits::vertex_count; ++index) - { - bool enabled = !!(input_mask & (1 << index)); - if (!enabled) - continue; + vertex_buffer_visitor visitor(vertex_count, command_list, m_vertex_buffer_data.Get(), m_buffer_data); + const auto& vertex_buffers = get_vertex_buffers(rsx::method_registers, vertex_ranges); - if (rsx::method_registers.vertex_arrays_info[index].size > 0) - { - // Active vertex array - const rsx::data_array_format_info &info = rsx::method_registers.vertex_arrays_info[index]; + for (const auto& vbo : vertex_buffers) + std::apply_visitor(visitor, vbo); - u32 element_size = rsx::get_vertex_type_size_on_host(info.type, info.size); - UINT buffer_size = element_size * vertex_count; - size_t heap_offset = m_buffer_data.alloc(buffer_size); - - u32 base_offset = rsx::method_registers.vertex_data_base_offset(); - u32 offset = rsx::method_registers.vertex_arrays_info[index].offset(); - u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31); - const gsl::byte *src_ptr = gsl::narrow_cast(vm::base(address)); - - void *mapped_buffer = m_buffer_data.map(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); - for (const auto &range : vertex_ranges) - { - gsl::span mapped_buffer_span = { (gsl::byte*)mapped_buffer, gsl::narrow_cast(buffer_size) }; - write_vertex_array_data_to_buffer(mapped_buffer_span, src_ptr, range.first, range.second, info.type, info.size, info.stride, element_size); - mapped_buffer = (char*)mapped_buffer + range.second * element_size; - } - m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); - - command_list->CopyBufferRegion(m_vertex_buffer_data.Get(), offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size); - - vertex_buffer_views.emplace_back(get_vertex_attribute_srv(info, offset_in_vertex_buffers_buffer, buffer_size)); - offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16 - - m_timers.buffer_upload_size += buffer_size; - - } - else if (rsx::method_registers.register_vertex_info[index].size > 0) - { - // In register vertex attribute - const rsx::register_vertex_data_info &info = rsx::method_registers.register_vertex_info[index]; - - u32 element_size = rsx::get_vertex_type_size_on_host(info.type, info.size); - UINT buffer_size = element_size; - size_t heap_offset = m_buffer_data.alloc(buffer_size); - - void *mapped_buffer = m_buffer_data.map(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); - memcpy(mapped_buffer, info.data.data(), buffer_size); - m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); - - command_list->CopyBufferRegion(m_vertex_buffer_data.Get(), offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size); - - vertex_buffer_views.emplace_back(get_vertex_attribute_srv(info.type, info.size, offset_in_vertex_buffers_buffer, buffer_size)); - offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16 - } - } command_list->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_vertex_buffer_data.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER)); - return vertex_buffer_views; + return visitor.vertex_buffer_views; } namespace diff --git a/rpcs3/Emu/RSX/GL/GLGSRender.h b/rpcs3/Emu/RSX/GL/GLGSRender.h index 8f3ae3aee0..f096234921 100644 --- a/rpcs3/Emu/RSX/GL/GLGSRender.h +++ b/rpcs3/Emu/RSX/GL/GLGSRender.h @@ -61,7 +61,7 @@ private: // Return element to draw and in case of indexed draw index type and offset in index buffer std::tuple > > set_vertex_buffer(); - void upload_vertex_buffers(const u32 &max_index, const u32 &max_vertex_attrib_size, const u32 &input_mask, const u32 &texture_index_offset); + void upload_vertex_buffers(u32 min_index, u32 max_index, const u32& max_vertex_attrib_size, const u32& texture_index_offset); // Returns vertex count u32 upload_inline_array(const u32 &max_vertex_attrib_size, const u32 &texture_index_offset); diff --git a/rpcs3/Emu/RSX/GL/vertex_buffer.cpp b/rpcs3/Emu/RSX/GL/vertex_buffer.cpp index ccaaffcae7..99a9c16c91 100644 --- a/rpcs3/Emu/RSX/GL/vertex_buffer.cpp +++ b/rpcs3/Emu/RSX/GL/vertex_buffer.cpp @@ -222,7 +222,7 @@ std::tuple > > GLGSRender::set_vertex u32 offset_in_index_buffer = mapping.second; std::tie(min_index, max_index, vertex_or_index_count) = upload_index_buffer(get_raw_index_array(rsx::method_registers.current_draw_clause.first_count_commands), ptr, type, rsx::method_registers.current_draw_clause.primitive, rsx::method_registers.current_draw_clause.first_count_commands, vertex_or_index_count); - + min_index = 0; // we must keep index to vertex mapping m_index_ring_buffer.unmap(); index_info = std::make_tuple(get_index_type(type), offset_in_index_buffer); } @@ -238,7 +238,8 @@ std::tuple > > GLGSRender::set_vertex { assert(rsx::method_registers.current_draw_clause.command == rsx::draw_command::array); vertex_count = rsx::method_registers.current_draw_clause.get_elements_count(); - max_index = vertex_count - 1; + min_index = rsx::method_registers.current_draw_clause.first_count_commands.front().first; + max_index = vertex_count - 1 + min_index; } if (!gl::is_primitive_native(rsx::method_registers.current_draw_clause.primitive)) @@ -259,7 +260,7 @@ std::tuple > > GLGSRender::set_vertex return std::make_tuple(vertex_or_index_count, index_info); } - upload_vertex_buffers(max_index, max_vertex_attrib_size, input_mask, texture_index_offset); + upload_vertex_buffers(min_index, max_index, max_vertex_attrib_size, texture_index_offset); std::chrono::time_point now = std::chrono::system_clock::now(); m_vertex_upload_time += std::chrono::duration_cast(now - then).count(); @@ -267,118 +268,126 @@ std::tuple > > GLGSRender::set_vertex return std::make_tuple(vertex_or_index_count, index_info); } -void GLGSRender::upload_vertex_buffers(const u32 &max_index, const u32 &max_vertex_attrib_size, const u32 &input_mask, const u32 &texture_index_offset) +namespace { - u32 verts_allocated = max_index + 1; - __glcheck m_attrib_ring_buffer.reserve_and_map(verts_allocated * max_vertex_attrib_size); + struct vertex_buffer_visitor + { + vertex_buffer_visitor(u32 vtx_cnt, + u32 texture_idx_offset, + gl::ring_buffer& heap, gl::glsl::program* prog, + gl::texture* attrib_buffer, + u32 min_texbuffer_offset) + : vertex_count(vtx_cnt) + , m_attrib_ring_info(heap) + , m_program(prog) + , texture_index_offset(texture_idx_offset) + , m_gl_attrib_buffers(attrib_buffer) + , m_min_texbuffer_alignment(min_texbuffer_offset) + { + } + void operator()(const rsx::vertex_array_buffer& vertex_array) + { + int location; + if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[vertex_array.index] + "_buffer", &location)) + return; + + // Fill vertex_array + u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size); + + u32 data_size = vertex_count * element_size; + u32 gl_type = to_gl_internal_type(vertex_array.type, vertex_array.attribute_size); + auto& texture = m_gl_attrib_buffers[vertex_array.index]; + + u32 buffer_offset = 0; + auto mapping = m_attrib_ring_info.alloc_from_reserve(data_size, m_min_texbuffer_alignment); + gsl::byte* dst = static_cast(mapping.first); + buffer_offset = mapping.second; + gsl::span dest_span(dst, data_size); + + prepare_buffer_for_writing(dst, vertex_array.type, vertex_array.attribute_size, vertex_count); + + write_vertex_array_data_to_buffer(dest_span, vertex_array.data, vertex_count, vertex_array.type, vertex_array.attribute_size, vertex_array.stride, rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size)); + + texture.copy_from(m_attrib_ring_info, gl_type, buffer_offset, data_size); + + //Link texture to uniform + m_program->uniforms.texture(location, vertex_array.index + texture_index_offset, texture); + } + + void operator()(const rsx::vertex_array_register& vertex_register) + { + int location; + if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[vertex_register.index] + "_buffer", &location)) + return; + switch (vertex_register.type) + { + case rsx::vertex_base_type::f: + { + const u32 element_size = rsx::get_vertex_type_size_on_host(vertex_register.type, vertex_register.attribute_size); + const u32 gl_type = to_gl_internal_type(vertex_register.type, vertex_register.attribute_size); + const size_t data_size = element_size; + + auto& texture = m_gl_attrib_buffers[vertex_register.index]; + + auto mapping = m_attrib_ring_info.alloc_from_reserve(data_size, m_min_texbuffer_alignment); + u8 *dst = static_cast(mapping.first); + + memcpy(dst, vertex_register.data.data(), element_size); + texture.copy_from(m_attrib_ring_info, gl_type, mapping.second, data_size); + + //Link texture to uniform + m_program->uniforms.texture(location, vertex_register.index + texture_index_offset, texture); + break; + } + default: + LOG_ERROR(RSX, "bad non array vertex data format (type=%d, size=%d)", (u32)vertex_register.type, vertex_register.attribute_size); + break; + } + } + + void operator()(const rsx::empty_vertex_array& vbo) + { + int location; + if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[vbo.index] + "_buffer", &location)) + return; + glActiveTexture(GL_TEXTURE0 + vbo.index + texture_index_offset); + glBindTexture(GL_TEXTURE_BUFFER, 0); + glProgramUniform1i(m_program->id(), location, vbo.index + texture_index_offset); + } + + protected: + u32 vertex_count; + gl::ring_buffer& m_attrib_ring_info; + gl::glsl::program* m_program; + u32 texture_index_offset; + gl::texture* m_gl_attrib_buffers; + GLint m_min_texbuffer_alignment; + }; + +} // End anonymous namespace + +void GLGSRender::upload_vertex_buffers(u32 min_index, u32 max_index, const u32& max_vertex_attrib_size, const u32& texture_index_offset) +{ + u32 verts_allocated = max_index - min_index + 1; + __glcheck m_attrib_ring_buffer.reserve_and_map(verts_allocated * max_vertex_attrib_size); + // Disable texture then reenable them + // Is it really necessary ? for (int index = 0; index < rsx::limits::vertex_count; ++index) { int location; if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[index] + "_buffer", &location)) continue; - bool enabled = !!(input_mask & (1 << index)); - if (!enabled) - { - glActiveTexture(GL_TEXTURE0 + index + texture_index_offset); - glBindTexture(GL_TEXTURE_BUFFER, 0); - glProgramUniform1i(m_program->id(), location, index + texture_index_offset); - continue; - } - - if (rsx::method_registers.vertex_arrays_info[index].size > 0) - { - auto &vertex_info = rsx::method_registers.vertex_arrays_info[index]; - - // Fill vertex_array - u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size); - //vertex_array.resize(vertex_draw_count * element_size); - - u32 data_size = verts_allocated * element_size; - u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size); - auto &texture = m_gl_attrib_buffers[index]; - - u32 buffer_offset = 0; - - // Get source pointer - u32 base_offset = rsx::method_registers.vertex_data_base_offset(); - u32 offset = rsx::method_registers.vertex_arrays_info[index].offset(); - u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31); - const gsl::byte *src_ptr = gsl::narrow_cast(vm::base(address)); - - if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array) - { - auto mapping = m_attrib_ring_buffer.alloc_from_reserve(data_size, m_min_texbuffer_alignment); - gsl::byte *dst = static_cast(mapping.first); - buffer_offset = mapping.second; - - size_t offset = 0; - gsl::span dest_span(dst, data_size); - prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, verts_allocated); - - for (const auto &first_count : rsx::method_registers.current_draw_clause.first_count_commands) - { - write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size)); - offset += first_count.second * element_size; - } - } - - if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed) - { - data_size = (max_index + 1) * element_size; - auto mapping = m_attrib_ring_buffer.alloc_from_reserve(data_size, m_min_texbuffer_alignment); - gsl::byte *dst = static_cast(mapping.first); - buffer_offset = mapping.second; - - gsl::span dest_span(dst, data_size); - prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, verts_allocated); - - write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size)); - } - - texture.copy_from(m_attrib_ring_buffer, gl_type, buffer_offset, data_size); - - //Link texture to uniform - m_program->uniforms.texture(location, index + texture_index_offset, texture); - } - else if (rsx::method_registers.register_vertex_info[index].size > 0) - { - auto &vertex_info = rsx::method_registers.register_vertex_info[index]; - - switch (vertex_info.type) - { - case rsx::vertex_base_type::f: - { - const u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size); - const u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size); - const size_t data_size = element_size; - - auto &texture = m_gl_attrib_buffers[index]; - - auto mapping = m_attrib_ring_buffer.alloc_from_reserve(data_size, m_min_texbuffer_alignment); - u8 *dst = static_cast(mapping.first); - - memcpy(dst, vertex_info.data.data(), element_size); - texture.copy_from(m_attrib_ring_buffer, gl_type, mapping.second, data_size); - - //Link texture to uniform - m_program->uniforms.texture(location, index + texture_index_offset, texture); - break; - } - default: - LOG_ERROR(RSX, "bad non array vertex data format (type=%d, size=%d)", (u32)vertex_info.type, vertex_info.size); - break; - } - } - else - { - glActiveTexture(GL_TEXTURE0 + index + texture_index_offset); - glBindTexture(GL_TEXTURE_BUFFER, 0); - glProgramUniform1i(m_program->id(), location, index + texture_index_offset); - continue; - } + glActiveTexture(GL_TEXTURE0 + index + texture_index_offset); + glBindTexture(GL_TEXTURE_BUFFER, 0); + glProgramUniform1i(m_program->id(), location, index + texture_index_offset); + continue; } + vertex_buffer_visitor visitor(verts_allocated, texture_index_offset, m_attrib_ring_buffer, m_program, m_gl_attrib_buffers, m_min_texbuffer_alignment); + const auto& vertex_buffers = get_vertex_buffers(rsx::method_registers, { {min_index, verts_allocated } }); + for (const auto& vbo : vertex_buffers) + std::apply_visitor(visitor, vbo); m_attrib_ring_buffer.unmap(); } diff --git a/rpcs3/Emu/RSX/RSXThread.cpp b/rpcs3/Emu/RSX/RSXThread.cpp index b3b13a5bea..42cefd5dc2 100644 --- a/rpcs3/Emu/RSX/RSXThread.cpp +++ b/rpcs3/Emu/RSX/RSXThread.cpp @@ -583,6 +583,58 @@ namespace rsx return{ ptr, count * type_size }; } + gsl::span thread::get_raw_vertex_buffer(const rsx::data_array_format_info& vertex_array_info, u32 base_offset, const std::vector>& vertex_ranges) const + { + u32 offset = vertex_array_info.offset(); + u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31); + + u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array_info.type, vertex_array_info.size); + + // Disjoint first_counts ranges not supported atm + for (int i = 0; i < vertex_ranges.size() - 1; i++) + { + const std::tuple& range = vertex_ranges[i]; + const std::tuple& next_range = vertex_ranges[i + 1]; + verify(HERE), (std::get<0>(range) + std::get<1>(range) == std::get<0>(next_range)); + } + u32 first = std::get<0>(vertex_ranges.front()); + u32 count = std::get<0>(vertex_ranges.back()) + std::get<1>(vertex_ranges.back()) - first; + + const gsl::byte* ptr = gsl::narrow_cast(vm::base(address)); + return {ptr + first * vertex_array_info.stride, count * vertex_array_info.stride + element_size}; + } + + std::vector> thread::get_vertex_buffers(const rsx::rsx_state& state, const std::vector>& vertex_ranges) const + { + std::vector> result; + u32 input_mask = state.vertex_attrib_input_mask(); + for (u8 index = 0; index < rsx::limits::vertex_count; ++index) + { + bool enabled = !!(input_mask & (1 << index)); + if (!enabled) + continue; + + if (state.vertex_arrays_info[index].size > 0) + { + const rsx::data_array_format_info& info = state.vertex_arrays_info[index]; + result.push_back(vertex_array_buffer{info.type, info.size, info.stride, + get_raw_vertex_buffer(info, state.vertex_data_base_offset(), vertex_ranges), index}); + continue; + } + + if (state.register_vertex_info[index].size > 0) + { + const rsx::register_vertex_data_info& info = state.register_vertex_info[index]; + result.push_back(vertex_array_register{info.type, info.size, info.data, index}); + continue; + } + + result.push_back(empty_vertex_array{index}); + } + + return result; + } + void thread::do_internal_task() { if (m_internal_tasks.empty()) diff --git a/rpcs3/Emu/RSX/RSXThread.h b/rpcs3/Emu/RSX/RSXThread.h index 5c5070e000..16890d1e0b 100644 --- a/rpcs3/Emu/RSX/RSXThread.h +++ b/rpcs3/Emu/RSX/RSXThread.h @@ -17,6 +17,7 @@ #include "Utilities/Timer.h" #include "Utilities/geometry.h" #include "rsx_trace.h" +#include "Utilities/variant.hpp" extern u64 get_system_time(); @@ -143,6 +144,28 @@ namespace rsx } }; + struct vertex_array_buffer + { + rsx::vertex_base_type type; + u8 attribute_size; + u8 stride; + gsl::span data; + u8 index; + }; + + struct vertex_array_register + { + rsx::vertex_base_type type; + u8 attribute_size; + std::array data; + u8 index; + }; + + struct empty_vertex_array + { + u8 index; + }; + class thread : public named_thread { std::shared_ptr m_vblank_thread; @@ -237,6 +260,9 @@ namespace rsx virtual bool on_access_violation(u32 address, bool is_writing) { return false; } gsl::span get_raw_index_array(const std::vector >& draw_indexed_clause) const; + gsl::span get_raw_vertex_buffer(const rsx::data_array_format_info&, u32 base_offset, const std::vector>& vertex_ranges) const; + + std::vector> get_vertex_buffers(const rsx::rsx_state& state, const std::vector>& vertex_ranges) const; private: std::mutex m_mtx_task; diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.h b/rpcs3/Emu/RSX/VK/VKGSRender.h index 7abb005dca..8f2ec08049 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.h +++ b/rpcs3/Emu/RSX/VK/VKGSRender.h @@ -88,7 +88,7 @@ private: /// returns primitive topology, is_indexed, index_count, offset in index buffer, index type std::tuple > > upload_vertex_data(); - void upload_vertex_buffers(u32 input_mask, u32 vertex_max_index); + void upload_vertex_buffers(u32 min_index, u32 vertex_max_index); /// returns number of vertex drawn u32 upload_inlined_array(); diff --git a/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp b/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp index 84c7378fc7..567480f234 100644 --- a/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp +++ b/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp @@ -233,6 +233,7 @@ VKGSRender::upload_vertex_data() if (is_indexed_draw) { std::tie(min_index, max_index, index_count, index_info) = upload_index_buffer(rsx::method_registers.current_draw_clause); + min_index = 0; // We need correct index mapping } bool primitives_emulated = false; @@ -248,9 +249,8 @@ VKGSRender::upload_vertex_data() { index_count = rsx::method_registers.current_draw_clause.get_elements_count(); } - - min_index = 0; - max_index = rsx::method_registers.current_draw_clause.get_elements_count() - 1; + min_index = rsx::method_registers.current_draw_clause.first_count_commands.front().first; + max_index = rsx::method_registers.current_draw_clause.get_elements_count() + min_index; } if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::inlined_array) @@ -265,127 +265,119 @@ VKGSRender::upload_vertex_data() if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array || rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed) { - upload_vertex_buffers(input_mask, max_index); + upload_vertex_buffers(min_index, max_index); } return std::make_tuple(prims, index_count, index_info); } -void VKGSRender::upload_vertex_buffers(u32 input_mask, u32 vertex_max_index) +namespace { - for (int index = 0; index < rsx::limits::vertex_count; ++index) + struct vertex_buffer_visitor { - bool enabled = !!(input_mask & (1 << index)); - - if (!m_program->has_uniform(s_reg_table[index])) - continue; - - if (!enabled) + vertex_buffer_visitor(u32 vtx_cnt, VkDevice dev, + vk::vk_data_heap& heap, vk::glsl::program* prog, + VkDescriptorSet desc_set, + std::vector>& buffer_view_to_clean) + : vertex_count(vtx_cnt) + , m_attrib_ring_info(heap) + , device(dev) + , m_program(prog) + , descriptor_sets(desc_set) + , m_buffer_view_to_clean(buffer_view_to_clean) { - continue; } - if (rsx::method_registers.vertex_arrays_info[index].size > 0) + void operator()(const rsx::vertex_array_buffer& vertex_array) { - auto &vertex_info = rsx::method_registers.vertex_arrays_info[index]; - // Fill vertex_array - u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size); - u32 real_element_size = vk::get_suitable_vk_size(vertex_info.type, vertex_info.size); + u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size); + u32 real_element_size = vk::get_suitable_vk_size(vertex_array.type, vertex_array.attribute_size); - u32 upload_size = real_element_size * (vertex_max_index + 1); - bool requires_expansion = vk::requires_component_expansion(vertex_info.type, vertex_info.size); + u32 upload_size = real_element_size * vertex_count; + bool requires_expansion = vk::requires_component_expansion(vertex_array.type, vertex_array.attribute_size); - // Get source pointer - u32 base_offset = rsx::method_registers.vertex_data_base_offset(); - u32 offset = rsx::method_registers.vertex_arrays_info[index].offset(); - u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31); - const gsl::byte *src_ptr = gsl::narrow_cast(vm::base(address)); - - u32 num_stored_verts = vertex_max_index + 1; VkDeviceSize offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(upload_size); void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, upload_size); - vk::prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, vertex_max_index + 1); + vk::prepare_buffer_for_writing(dst, vertex_array.type, vertex_array.attribute_size, vertex_count); gsl::span dest_span(static_cast(dst), upload_size); - if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array) - { - VkDeviceSize offset = 0; - for (const auto &first_count : rsx::method_registers.current_draw_clause.first_count_commands) - { - write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride, real_element_size); - offset += first_count.second * real_element_size; - } - } - else if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed) - { - write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, vertex_max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, real_element_size); - } + write_vertex_array_data_to_buffer(dest_span, vertex_array.data, vertex_count, vertex_array.type, vertex_array.attribute_size, vertex_array.stride, real_element_size); m_attrib_ring_info.unmap(); - const VkFormat format = vk::get_suitable_vk_format(vertex_info.type, vertex_info.size); + const VkFormat format = vk::get_suitable_vk_format(vertex_array.type, vertex_array.attribute_size); - m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, upload_size)); - m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[index], descriptor_sets); + m_buffer_view_to_clean.push_back(std::make_unique(device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, upload_size)); + m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[vertex_array.index], descriptor_sets); } - else if (rsx::method_registers.register_vertex_info[index].size > 0) - { - //Untested! - auto &vertex_info = rsx::method_registers.register_vertex_info[index]; - switch (vertex_info.type) + void operator()(const rsx::vertex_array_register& vertex_register) + { + switch (vertex_register.type) { case rsx::vertex_base_type::f: { - size_t data_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size); - const VkFormat format = vk::get_suitable_vk_format(vertex_info.type, vertex_info.size); + size_t data_size = rsx::get_vertex_type_size_on_host(vertex_register.type, vertex_register.attribute_size); + const VkFormat format = vk::get_suitable_vk_format(vertex_register.type, vertex_register.attribute_size); u32 offset_in_attrib_buffer = 0; - void *data_ptr = vertex_info.data.data(); - if (vk::requires_component_expansion(vertex_info.type, vertex_info.size)) + if (vk::requires_component_expansion(vertex_register.type, vertex_register.attribute_size)) { - const u32 num_stored_verts = static_cast(data_size / (sizeof(float) * vertex_info.size)); - const u32 real_element_size = vk::get_suitable_vk_size(vertex_info.type, vertex_info.size); + const u32 num_stored_verts = static_cast(data_size / (sizeof(float) * vertex_register.attribute_size)); + const u32 real_element_size = vk::get_suitable_vk_size(vertex_register.type, vertex_register.attribute_size); data_size = real_element_size * num_stored_verts; offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(data_size); void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, data_size); - vk::expand_array_components(reinterpret_cast(vertex_info.data.data()), dst, num_stored_verts); + vk::expand_array_components(reinterpret_cast(vertex_register.data.data()), dst, num_stored_verts); m_attrib_ring_info.unmap(); } else { offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(data_size); void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, data_size); - memcpy(dst, vertex_info.data.data(), data_size); + memcpy(dst, vertex_register.data.data(), data_size); m_attrib_ring_info.unmap(); } - m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size)); - m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[index], descriptor_sets); + m_buffer_view_to_clean.push_back(std::make_unique(device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size)); + m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[vertex_register.index], descriptor_sets); break; } default: - fmt::throw_exception("Unknown base type %d" HERE, (u32)vertex_info.type); + fmt::throw_exception("Unknown base type %d" HERE, (u32)vertex_register.type); } } - else - { - //This section should theoretically be unreachable (data stream without available data) - //Variable is defined in the shaders but no data is available - //Bind a buffer view to keep the driver from crashing if access is attempted. + void operator()(const rsx::empty_vertex_array& vbo) + { u32 offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(32); void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, 32); memset(dst, 0, 32); m_attrib_ring_info.unmap(); - - m_buffer_view_to_clean.push_back(std::make_unique(*m_device, m_attrib_ring_info.heap->value, VK_FORMAT_R32_SFLOAT, offset_in_attrib_buffer, 32)); - m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[index], descriptor_sets); + m_buffer_view_to_clean.push_back(std::make_unique(device, m_attrib_ring_info.heap->value, VK_FORMAT_R32_SFLOAT, offset_in_attrib_buffer, 32)); + m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[vbo.index], descriptor_sets); } - } + + protected: + VkDevice device; + u32 vertex_count; + vk::vk_data_heap& m_attrib_ring_info; + vk::glsl::program* m_program; + VkDescriptorSet descriptor_sets; + std::vector>& m_buffer_view_to_clean; + }; + +} // End anonymous namespace + +void VKGSRender::upload_vertex_buffers(u32 min_index, u32 vertex_max_index) +{ + vertex_buffer_visitor visitor(vertex_max_index - min_index + 1, *m_device, m_attrib_ring_info, m_program, descriptor_sets, m_buffer_view_to_clean); + const auto& vertex_buffers = get_vertex_buffers(rsx::method_registers, {{min_index, vertex_max_index - min_index + 1}}); + for (const auto& vbo : vertex_buffers) + std::apply_visitor(visitor, vbo); } u32 VKGSRender::upload_inlined_array() diff --git a/rpcs3/Gui/SettingsDialog.cpp b/rpcs3/Gui/SettingsDialog.cpp index c61beade72..2771aa4ca5 100644 --- a/rpcs3/Gui/SettingsDialog.cpp +++ b/rpcs3/Gui/SettingsDialog.cpp @@ -213,8 +213,8 @@ SettingsDialog::SettingsDialog(wxWindow* parent) std::vector> pads; - static const u32 width = 458; - static const u32 height = 400; + static const u32 width = 458 * 2; + static const u32 height = 400 * 2; // Settings panels wxNotebook* nb_config = new wxNotebook(this, wxID_ANY, wxPoint(6, 6), wxSize(width, height)); @@ -521,7 +521,7 @@ SettingsDialog::SettingsDialog(wxWindow* parent) SetSizerAndFit(s_subpanel_system, false); SetSizerAndFit(s_b_panel, false); - SetSize(width + 26, height + 80); + SetSize(width + 26 * 2, height + 80 * 2); if (ShowModal() == wxID_OK) {