Merge pull request #2084 from vlj/rsx-refactor

RSX: Puts vertex buffer in array and in register in a variant type.
This commit is contained in:
vlj 2016-08-25 21:14:51 +02:00 committed by GitHub
commit 7f3cb4d3c9
12 changed files with 1444 additions and 251 deletions

View File

@ -0,0 +1,124 @@
#ifndef MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP
#define MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP
// Based on variant/recursive_wrapper.hpp from boost.
//
// Original license:
//
// Copyright (c) 2002-2003
// Eric Friedman, Itay Maman
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cassert>
#include <utility>
//namespace mapbox {
//namespace util {
namespace std {
template <typename T>
class recursive_wrapper
{
T* p_;
void assign(T const& rhs)
{
this->get() = rhs;
}
public:
using type = T;
/**
* Default constructor default initializes the internally stored value.
* For POD types this means nothing is done and the storage is
* uninitialized.
*
* @throws std::bad_alloc if there is insufficient memory for an object
* of type T.
* @throws any exception thrown by the default constructur of T.
*/
recursive_wrapper()
: p_(new T){}
~recursive_wrapper() noexcept { delete p_; }
recursive_wrapper(recursive_wrapper const& operand)
: p_(new T(operand.get())) {}
recursive_wrapper(T const& operand)
: p_(new T(operand)) {}
recursive_wrapper(recursive_wrapper&& operand)
: p_(new T(std::move(operand.get()))) {}
recursive_wrapper(T&& operand)
: p_(new T(std::move(operand))) {}
inline recursive_wrapper& operator=(recursive_wrapper const& rhs)
{
assign(rhs.get());
return *this;
}
inline recursive_wrapper& operator=(T const& rhs)
{
assign(rhs);
return *this;
}
inline void swap(recursive_wrapper& operand) noexcept
{
T* temp = operand.p_;
operand.p_ = p_;
p_ = temp;
}
recursive_wrapper& operator=(recursive_wrapper&& rhs) noexcept
{
swap(rhs);
return *this;
}
recursive_wrapper& operator=(T&& rhs)
{
get() = std::move(rhs);
return *this;
}
T& get()
{
assert(p_);
return *get_pointer();
}
T const& get() const
{
assert(p_);
return *get_pointer();
}
T* get_pointer() { return p_; }
const T* get_pointer() const { return p_; }
operator T const&() const { return this->get(); }
operator T&() { return this->get(); }
}; // class recursive_wrapper
template <typename T>
inline void swap(recursive_wrapper<T>& lhs, recursive_wrapper<T>& rhs) noexcept
{
lhs.swap(rhs);
}
}
//} // namespace util
//} // namespace mapbox
#endif // MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP

973
Utilities/variant.hpp Normal file
View File

@ -0,0 +1,973 @@
#ifndef MAPBOX_UTIL_VARIANT_HPP
#define MAPBOX_UTIL_VARIANT_HPP
#include <cassert>
#include <cstddef> // size_t
#include <new> // operator new
#include <stdexcept> // runtime_error
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "recursive_wrapper.hpp"
// clang-format off
// [[deprecated]] is only available in C++14, use this for the time being
#if __cplusplus <= 201103L
# ifdef __GNUC__
# define MAPBOX_VARIANT_DEPRECATED __attribute__((deprecated))
# elif defined(_MSC_VER)
# define MAPBOX_VARIANT_DEPRECATED __declspec(deprecated)
# else
# define MAPBOX_VARIANT_DEPRECATED
# endif
#else
# define MAPBOX_VARIANT_DEPRECATED [[deprecated]]
#endif
#ifdef _MSC_VER
// https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
# ifdef NDEBUG
# define VARIANT_INLINE __forceinline
# else
# define VARIANT_INLINE //__declspec(noinline)
# endif
#else
# ifdef NDEBUG
# define VARIANT_INLINE //inline __attribute__((always_inline))
# else
# define VARIANT_INLINE __attribute__((noinline))
# endif
#endif
// clang-format on
// Exceptions
#if defined( __EXCEPTIONS) || defined( _MSC_VER)
#define HAS_EXCEPTIONS
#endif
#define VARIANT_MAJOR_VERSION 1
#define VARIANT_MINOR_VERSION 1
#define VARIANT_PATCH_VERSION 0
#define VARIANT_VERSION (VARIANT_MAJOR_VERSION * 100000) + (VARIANT_MINOR_VERSION * 100) + (VARIANT_PATCH_VERSION)
//namespace mapbox {
// namespace util {
namespace std {
// XXX This should derive from std::logic_error instead of std::runtime_error.
// See https://github.com/mapbox/variant/issues/48 for details.
class bad_variant_access : public std::runtime_error
{
public:
explicit bad_variant_access(const std::string& what_arg)
: runtime_error(what_arg) {}
explicit bad_variant_access(const char* what_arg)
: runtime_error(what_arg) {}
}; // class bad_variant_access
template <typename R = void>
struct MAPBOX_VARIANT_DEPRECATED static_visitor
{
using result_type = R;
protected:
static_visitor() {}
~static_visitor() {}
};
namespace detail {
static constexpr std::size_t invalid_value = std::size_t(-1);
template <typename T, typename... Types>
struct direct_type;
template <typename T, typename First, typename... Types>
struct direct_type<T, First, Types...>
{
static constexpr std::size_t index = std::is_same<T, First>::value
? sizeof...(Types)
: direct_type<T, Types...>::index;
};
template <typename T>
struct direct_type<T>
{
static constexpr std::size_t index = invalid_value;
};
#if __cpp_lib_logical_traits >= 201510L
using std::disjunction;
#else
template <typename...>
struct disjunction : std::false_type {};
template <typename B1>
struct disjunction<B1> : B1 {};
template <typename B1, typename B2>
struct disjunction<B1, B2> : std::conditional<B1::value, B1, B2>::type {};
template <typename B1, typename... Bs>
struct disjunction<B1, Bs...> : std::conditional<B1::value, B1, disjunction<Bs...>>::type {};
#endif
template <typename T, typename... Types>
struct convertible_type;
template <typename T, typename First, typename... Types>
struct convertible_type<T, First, Types...>
{
static constexpr std::size_t index = std::is_convertible<T, First>::value
? disjunction<std::is_convertible<T, Types>...>::value ? invalid_value : sizeof...(Types)
: convertible_type<T, Types...>::index;
};
template <typename T>
struct convertible_type<T>
{
static constexpr std::size_t index = invalid_value;
};
template <typename T, typename... Types>
struct value_traits
{
using value_type = typename std::remove_const<typename std::remove_reference<T>::type>::type;
static constexpr std::size_t direct_index = direct_type<value_type, Types...>::index;
static constexpr bool is_direct = direct_index != invalid_value;
static constexpr std::size_t index = is_direct ? direct_index : convertible_type<value_type, Types...>::index;
static constexpr bool is_valid = index != invalid_value;
static constexpr std::size_t tindex = is_valid ? sizeof...(Types)-index : 0;
using target_type = typename std::tuple_element<tindex, std::tuple<void, Types...>>::type;
};
template <typename T, typename R = void>
struct enable_if_type
{
using type = R;
};
template <typename F, typename V, typename Enable = void>
struct result_of_unary_visit
{
using type = typename std::result_of<F(V&)>::type;
};
template <typename F, typename V>
struct result_of_unary_visit<F, V, typename enable_if_type<typename F::result_type>::type>
{
using type = typename F::result_type;
};
template <typename F, typename V, typename Enable = void>
struct result_of_binary_visit
{
using type = typename std::result_of<F(V&, V&)>::type;
};
template <typename F, typename V>
struct result_of_binary_visit<F, V, typename enable_if_type<typename F::result_type>::type>
{
using type = typename F::result_type;
};
template <std::size_t arg1, std::size_t... others>
struct static_max;
template <std::size_t arg>
struct static_max<arg>
{
static const std::size_t value = arg;
};
template <std::size_t arg1, std::size_t arg2, std::size_t... others>
struct static_max<arg1, arg2, others...>
{
static const std::size_t value = arg1 >= arg2 ? static_max<arg1, others...>::value : static_max<arg2, others...>::value;
};
template <typename... Types>
struct variant_helper;
template <typename T, typename... Types>
struct variant_helper<T, Types...>
{
VARIANT_INLINE static void destroy(const std::size_t type_index, void* data)
{
if (type_index == sizeof...(Types))
{
reinterpret_cast<T*>(data)->~T();
}
else
{
variant_helper<Types...>::destroy(type_index, data);
}
}
VARIANT_INLINE static void move(const std::size_t old_type_index, void* old_value, void* new_value)
{
if (old_type_index == sizeof...(Types))
{
new (new_value) T(std::move(*reinterpret_cast<T*>(old_value)));
}
else
{
variant_helper<Types...>::move(old_type_index, old_value, new_value);
}
}
VARIANT_INLINE static void copy(const std::size_t old_type_index, const void* old_value, void* new_value)
{
if (old_type_index == sizeof...(Types))
{
new (new_value) T(*reinterpret_cast<const T*>(old_value));
}
else
{
variant_helper<Types...>::copy(old_type_index, old_value, new_value);
}
}
};
template <>
struct variant_helper<>
{
VARIANT_INLINE static void destroy(const std::size_t, void*) {}
VARIANT_INLINE static void move(const std::size_t, void*, void*) {}
VARIANT_INLINE static void copy(const std::size_t, const void*, void*) {}
};
template <typename T>
struct unwrapper
{
static T const& apply_const(T const& obj) { return obj; }
static T& apply(T& obj) { return obj; }
};
template <typename T>
struct unwrapper<recursive_wrapper<T>>
{
static auto apply_const(recursive_wrapper<T> const& obj)
-> typename recursive_wrapper<T>::type const&
{
return obj.get();
}
static auto apply(recursive_wrapper<T>& obj)
-> typename recursive_wrapper<T>::type&
{
return obj.get();
}
};
template <typename T>
struct unwrapper<std::reference_wrapper<T>>
{
static auto apply_const(std::reference_wrapper<T> const& obj)
-> typename std::reference_wrapper<T>::type const&
{
return obj.get();
}
static auto apply(std::reference_wrapper<T>& obj)
-> typename std::reference_wrapper<T>::type&
{
return obj.get();
}
};
template <typename F, typename V, typename R, typename... Types>
struct dispatcher;
template <typename F, typename V, typename R, typename T, typename... Types>
struct dispatcher<F, V, R, T, Types...>
{
VARIANT_INLINE static R apply_const(V const& v, F&& f)
{
if (v.template is<T>())
{
return f(unwrapper<T>::apply_const(v.template get_unchecked<T>()));
}
else
{
return dispatcher<F, V, R, Types...>::apply_const(v, std::forward<F>(f));
}
}
VARIANT_INLINE static R apply(V& v, F&& f)
{
if (v.template is<T>())
{
return f(unwrapper<T>::apply(v.template get_unchecked<T>()));
}
else
{
return dispatcher<F, V, R, Types...>::apply(v, std::forward<F>(f));
}
}
};
template <typename F, typename V, typename R, typename T>
struct dispatcher<F, V, R, T>
{
VARIANT_INLINE static R apply_const(V const& v, F&& f)
{
return f(unwrapper<T>::apply_const(v.template get_unchecked<T>()));
}
VARIANT_INLINE static R apply(V& v, F&& f)
{
return f(unwrapper<T>::apply(v.template get_unchecked<T>()));
}
};
template <typename F, typename V, typename R, typename T, typename... Types>
struct binary_dispatcher_rhs;
template <typename F, typename V, typename R, typename T0, typename T1, typename... Types>
struct binary_dispatcher_rhs<F, V, R, T0, T1, Types...>
{
VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f)
{
if (rhs.template is<T1>()) // call binary functor
{
return f(unwrapper<T0>::apply_const(lhs.template get_unchecked<T0>()),
unwrapper<T1>::apply_const(rhs.template get_unchecked<T1>()));
}
else
{
return binary_dispatcher_rhs<F, V, R, T0, Types...>::apply_const(lhs, rhs, std::forward<F>(f));
}
}
VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f)
{
if (rhs.template is<T1>()) // call binary functor
{
return f(unwrapper<T0>::apply(lhs.template get_unchecked<T0>()),
unwrapper<T1>::apply(rhs.template get_unchecked<T1>()));
}
else
{
return binary_dispatcher_rhs<F, V, R, T0, Types...>::apply(lhs, rhs, std::forward<F>(f));
}
}
};
template <typename F, typename V, typename R, typename T0, typename T1>
struct binary_dispatcher_rhs<F, V, R, T0, T1>
{
VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f)
{
return f(unwrapper<T0>::apply_const(lhs.template get_unchecked<T0>()),
unwrapper<T1>::apply_const(rhs.template get_unchecked<T1>()));
}
VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f)
{
return f(unwrapper<T0>::apply(lhs.template get_unchecked<T0>()),
unwrapper<T1>::apply(rhs.template get_unchecked<T1>()));
}
};
template <typename F, typename V, typename R, typename T, typename... Types>
struct binary_dispatcher_lhs;
template <typename F, typename V, typename R, typename T0, typename T1, typename... Types>
struct binary_dispatcher_lhs<F, V, R, T0, T1, Types...>
{
VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f)
{
if (lhs.template is<T1>()) // call binary functor
{
return f(unwrapper<T1>::apply_const(lhs.template get_unchecked<T1>()),
unwrapper<T0>::apply_const(rhs.template get_unchecked<T0>()));
}
else
{
return binary_dispatcher_lhs<F, V, R, T0, Types...>::apply_const(lhs, rhs, std::forward<F>(f));
}
}
VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f)
{
if (lhs.template is<T1>()) // call binary functor
{
return f(unwrapper<T1>::apply(lhs.template get_unchecked<T1>()),
unwrapper<T0>::apply(rhs.template get_unchecked<T0>()));
}
else
{
return binary_dispatcher_lhs<F, V, R, T0, Types...>::apply(lhs, rhs, std::forward<F>(f));
}
}
};
template <typename F, typename V, typename R, typename T0, typename T1>
struct binary_dispatcher_lhs<F, V, R, T0, T1>
{
VARIANT_INLINE static R apply_const(V const& lhs, V const& rhs, F&& f)
{
return f(unwrapper<T1>::apply_const(lhs.template get_unchecked<T1>()),
unwrapper<T0>::apply_const(rhs.template get_unchecked<T0>()));
}
VARIANT_INLINE static R apply(V& lhs, V& rhs, F&& f)
{
return f(unwrapper<T1>::apply(lhs.template get_unchecked<T1>()),
unwrapper<T0>::apply(rhs.template get_unchecked<T0>()));
}
};
template <typename F, typename V, typename R, typename... Types>
struct binary_dispatcher;
template <typename F, typename V, typename R, typename T, typename... Types>
struct binary_dispatcher<F, V, R, T, Types...>
{
VARIANT_INLINE static R apply_const(V const& v0, V const& v1, F&& f)
{
if (v0.template is<T>())
{
if (v1.template is<T>())
{
return f(unwrapper<T>::apply_const(v0.template get_unchecked<T>()),
unwrapper<T>::apply_const(v1.template get_unchecked<T>())); // call binary functor
}
else
{
return binary_dispatcher_rhs<F, V, R, T, Types...>::apply_const(v0, v1, std::forward<F>(f));
}
}
else if (v1.template is<T>())
{
return binary_dispatcher_lhs<F, V, R, T, Types...>::apply_const(v0, v1, std::forward<F>(f));
}
return binary_dispatcher<F, V, R, Types...>::apply_const(v0, v1, std::forward<F>(f));
}
VARIANT_INLINE static R apply(V& v0, V& v1, F&& f)
{
if (v0.template is<T>())
{
if (v1.template is<T>())
{
return f(unwrapper<T>::apply(v0.template get_unchecked<T>()),
unwrapper<T>::apply(v1.template get_unchecked<T>())); // call binary functor
}
else
{
return binary_dispatcher_rhs<F, V, R, T, Types...>::apply(v0, v1, std::forward<F>(f));
}
}
else if (v1.template is<T>())
{
return binary_dispatcher_lhs<F, V, R, T, Types...>::apply(v0, v1, std::forward<F>(f));
}
return binary_dispatcher<F, V, R, Types...>::apply(v0, v1, std::forward<F>(f));
}
};
template <typename F, typename V, typename R, typename T>
struct binary_dispatcher<F, V, R, T>
{
VARIANT_INLINE static R apply_const(V const& v0, V const& v1, F&& f)
{
return f(unwrapper<T>::apply_const(v0.template get_unchecked<T>()),
unwrapper<T>::apply_const(v1.template get_unchecked<T>())); // call binary functor
}
VARIANT_INLINE static R apply(V& v0, V& v1, F&& f)
{
return f(unwrapper<T>::apply(v0.template get_unchecked<T>()),
unwrapper<T>::apply(v1.template get_unchecked<T>())); // call binary functor
}
};
// comparator functors
struct equal_comp
{
template <typename T>
bool operator()(T const& lhs, T const& rhs) const
{
return lhs == rhs;
}
};
struct less_comp
{
template <typename T>
bool operator()(T const& lhs, T const& rhs) const
{
return lhs < rhs;
}
};
template <typename Variant, typename Comp>
class comparer
{
public:
explicit comparer(Variant const& lhs) noexcept
: lhs_(lhs) {}
comparer& operator=(comparer const&) = delete;
// visitor
template <typename T>
bool operator()(T const& rhs_content) const
{
T const& lhs_content = lhs_.template get_unchecked<T>();
return Comp()(lhs_content, rhs_content);
}
private:
Variant const& lhs_;
};
} // namespace detail
struct no_init
{
};
template <typename... Types>
class variant
{
static_assert(sizeof...(Types) > 0, "Template parameter type list of variant can not be empty");
static_assert(!detail::disjunction<std::is_reference<Types>...>::value, "Variant can not hold reference types. Maybe use std::reference_wrapper?");
private:
static const std::size_t data_size = detail::static_max<sizeof(Types)...>::value;
static const std::size_t data_align = detail::static_max<alignof(Types)...>::value;
using first_type = typename std::tuple_element<0, std::tuple<Types...>>::type;
using data_type = typename std::aligned_storage<data_size, data_align>::type;
using helper_type = detail::variant_helper<Types...>;
std::size_t type_index;
data_type data;
public:
VARIANT_INLINE variant() noexcept(std::is_nothrow_default_constructible<first_type>::value)
: type_index(sizeof...(Types)-1)
{
static_assert(std::is_default_constructible<first_type>::value, "First type in variant must be default constructible to allow default construction of variant");
new (&data) first_type();
}
VARIANT_INLINE variant(no_init) noexcept
: type_index(detail::invalid_value) {}
// http://isocpp.org/blog/2012/11/universal-references-in-c11-scott-meyers
template <typename T, typename Traits = detail::value_traits<T, Types...>,
typename Enable = typename std::enable_if<Traits::is_valid>::type>
VARIANT_INLINE variant(T&& val) noexcept(std::is_nothrow_constructible<typename Traits::target_type, T&&>::value)
: type_index(Traits::index)
{
new (&data) typename Traits::target_type(std::forward<T>(val));
}
VARIANT_INLINE variant(variant<Types...> const& old)
: type_index(old.type_index)
{
helper_type::copy(old.type_index, &old.data, &data);
}
VARIANT_INLINE variant(variant<Types...>&& old) noexcept(std::is_nothrow_move_constructible<std::tuple<Types...>>::value)
: type_index(old.type_index)
{
helper_type::move(old.type_index, &old.data, &data);
}
private:
VARIANT_INLINE void copy_assign(variant<Types...> const& rhs)
{
helper_type::destroy(type_index, &data);
type_index = detail::invalid_value;
helper_type::copy(rhs.type_index, &rhs.data, &data);
type_index = rhs.type_index;
}
VARIANT_INLINE void move_assign(variant<Types...>&& rhs)
{
helper_type::destroy(type_index, &data);
type_index = detail::invalid_value;
helper_type::move(rhs.type_index, &rhs.data, &data);
type_index = rhs.type_index;
}
public:
VARIANT_INLINE variant<Types...>& operator=(variant<Types...>&& other)
{
move_assign(std::move(other));
return *this;
}
VARIANT_INLINE variant<Types...>& operator=(variant<Types...> const& other)
{
copy_assign(other);
return *this;
}
// conversions
// move-assign
template <typename T>
VARIANT_INLINE variant<Types...>& operator=(T&& rhs) noexcept
{
variant<Types...> temp(std::forward<T>(rhs));
move_assign(std::move(temp));
return *this;
}
// copy-assign
template <typename T>
VARIANT_INLINE variant<Types...>& operator=(T const& rhs)
{
variant<Types...> temp(rhs);
copy_assign(temp);
return *this;
}
template <typename T, typename std::enable_if<
(detail::direct_type<T, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE bool is() const
{
return type_index == detail::direct_type<T, Types...>::index;
}
template <typename T, typename std::enable_if<
(detail::direct_type<recursive_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE bool is() const
{
return type_index == detail::direct_type<recursive_wrapper<T>, Types...>::index;
}
VARIANT_INLINE bool valid() const
{
return type_index != detail::invalid_value;
}
template <typename T, typename... Args>
VARIANT_INLINE void set(Args&&... args)
{
helper_type::destroy(type_index, &data);
type_index = detail::invalid_value;
new (&data) T(std::forward<Args>(args)...);
type_index = detail::direct_type<T, Types...>::index;
}
// get_unchecked<T>()
template <typename T, typename std::enable_if<
(detail::direct_type<T, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T& get_unchecked()
{
return *reinterpret_cast<T*>(&data);
}
#ifdef HAS_EXCEPTIONS
// get<T>()
template <typename T, typename std::enable_if<
(detail::direct_type<T, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T& get()
{
if (type_index == detail::direct_type<T, Types...>::index)
{
return *reinterpret_cast<T*>(&data);
}
else
{
throw bad_variant_access("in get<T>()");
}
}
#endif
template <typename T, typename std::enable_if<
(detail::direct_type<T, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T const& get_unchecked() const
{
return *reinterpret_cast<T const*>(&data);
}
#ifdef HAS_EXCEPTIONS
template <typename T, typename std::enable_if<
(detail::direct_type<T, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T const& get() const
{
if (type_index == detail::direct_type<T, Types...>::index)
{
return *reinterpret_cast<T const*>(&data);
}
else
{
throw bad_variant_access("in get<T>()");
}
}
#endif
// get_unchecked<T>() - T stored as recursive_wrapper<T>
template <typename T, typename std::enable_if<
(detail::direct_type<recursive_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T& get_unchecked()
{
return (*reinterpret_cast<recursive_wrapper<T>*>(&data)).get();
}
#ifdef HAS_EXCEPTIONS
// get<T>() - T stored as recursive_wrapper<T>
template <typename T, typename std::enable_if<
(detail::direct_type<recursive_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T& get()
{
if (type_index == detail::direct_type<recursive_wrapper<T>, Types...>::index)
{
return (*reinterpret_cast<recursive_wrapper<T>*>(&data)).get();
}
else
{
throw bad_variant_access("in get<T>()");
}
}
#endif
template <typename T, typename std::enable_if<
(detail::direct_type<recursive_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T const& get_unchecked() const
{
return (*reinterpret_cast<recursive_wrapper<T> const*>(&data)).get();
}
#ifdef HAS_EXCEPTIONS
template <typename T, typename std::enable_if<
(detail::direct_type<recursive_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T const& get() const
{
if (type_index == detail::direct_type<recursive_wrapper<T>, Types...>::index)
{
return (*reinterpret_cast<recursive_wrapper<T> const*>(&data)).get();
}
else
{
throw bad_variant_access("in get<T>()");
}
}
#endif
// get_unchecked<T>() - T stored as std::reference_wrapper<T>
template <typename T, typename std::enable_if<
(detail::direct_type<std::reference_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T& get_unchecked()
{
return (*reinterpret_cast<std::reference_wrapper<T>*>(&data)).get();
}
#ifdef HAS_EXCEPTIONS
// get<T>() - T stored as std::reference_wrapper<T>
template <typename T, typename std::enable_if<
(detail::direct_type<std::reference_wrapper<T>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T& get()
{
if (type_index == detail::direct_type<std::reference_wrapper<T>, Types...>::index)
{
return (*reinterpret_cast<std::reference_wrapper<T>*>(&data)).get();
}
else
{
throw bad_variant_access("in get<T>()");
}
}
#endif
template <typename T, typename std::enable_if<
(detail::direct_type<std::reference_wrapper<T const>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T const& get_unchecked() const
{
return (*reinterpret_cast<std::reference_wrapper<T const> const*>(&data)).get();
}
#ifdef HAS_EXCEPTIONS
template <typename T, typename std::enable_if<
(detail::direct_type<std::reference_wrapper<T const>, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE T const& get() const
{
if (type_index == detail::direct_type<std::reference_wrapper<T const>, Types...>::index)
{
return (*reinterpret_cast<std::reference_wrapper<T const> const*>(&data)).get();
}
else
{
throw bad_variant_access("in get<T>()");
}
}
#endif
// This function is deprecated because it returns an internal index field.
// Use which() instead.
MAPBOX_VARIANT_DEPRECATED VARIANT_INLINE std::size_t get_type_index() const
{
return type_index;
}
VARIANT_INLINE int which() const noexcept
{
return static_cast<int>(sizeof...(Types)-type_index - 1);
}
template <typename T, typename std::enable_if<
(detail::direct_type<T, Types...>::index != detail::invalid_value)>::type* = nullptr>
VARIANT_INLINE static constexpr int which() noexcept
{
return static_cast<int>(sizeof...(Types)-detail::direct_type<T, Types...>::index - 1);
}
// visitor
// unary
template <typename F, typename V, typename R = typename detail::result_of_unary_visit<F, first_type>::type>
auto VARIANT_INLINE static visit(V const& v, F&& f)
-> decltype(detail::dispatcher<F, V, R, Types...>::apply_const(v, std::forward<F>(f)))
{
return detail::dispatcher<F, V, R, Types...>::apply_const(v, std::forward<F>(f));
}
// non-const
template <typename F, typename V, typename R = typename detail::result_of_unary_visit<F, first_type>::type>
auto VARIANT_INLINE static visit(V& v, F&& f)
-> decltype(detail::dispatcher<F, V, R, Types...>::apply(v, std::forward<F>(f)))
{
return detail::dispatcher<F, V, R, Types...>::apply(v, std::forward<F>(f));
}
// binary
// const
template <typename F, typename V, typename R = typename detail::result_of_binary_visit<F, first_type>::type>
auto VARIANT_INLINE static binary_visit(V const& v0, V const& v1, F&& f)
-> decltype(detail::binary_dispatcher<F, V, R, Types...>::apply_const(v0, v1, std::forward<F>(f)))
{
return detail::binary_dispatcher<F, V, R, Types...>::apply_const(v0, v1, std::forward<F>(f));
}
// non-const
template <typename F, typename V, typename R = typename detail::result_of_binary_visit<F, first_type>::type>
auto VARIANT_INLINE static binary_visit(V& v0, V& v1, F&& f)
-> decltype(detail::binary_dispatcher<F, V, R, Types...>::apply(v0, v1, std::forward<F>(f)))
{
return detail::binary_dispatcher<F, V, R, Types...>::apply(v0, v1, std::forward<F>(f));
}
~variant() noexcept // no-throw destructor
{
helper_type::destroy(type_index, &data);
}
// comparison operators
// equality
VARIANT_INLINE bool operator==(variant const& rhs) const
{
assert(valid() && rhs.valid());
if (this->which() != rhs.which())
{
return false;
}
detail::comparer<variant, detail::equal_comp> visitor(*this);
return visit(rhs, visitor);
}
VARIANT_INLINE bool operator!=(variant const& rhs) const
{
return !(*this == rhs);
}
// less than
VARIANT_INLINE bool operator<(variant const& rhs) const
{
assert(valid() && rhs.valid());
if (this->which() != rhs.which())
{
return this->which() < rhs.which();
}
detail::comparer<variant, detail::less_comp> visitor(*this);
return visit(rhs, visitor);
}
VARIANT_INLINE bool operator>(variant const& rhs) const
{
return rhs < *this;
}
VARIANT_INLINE bool operator<=(variant const& rhs) const
{
return !(*this > rhs);
}
VARIANT_INLINE bool operator>=(variant const& rhs) const
{
return !(*this < rhs);
}
};
// unary visitor interface
// const
template <typename F, typename V>
auto VARIANT_INLINE apply_visitor(F&& f, V const& v) -> decltype(V::visit(v, std::forward<F>(f)))
{
return V::visit(v, std::forward<F>(f));
}
// non-const
template <typename F, typename V>
auto VARIANT_INLINE apply_visitor(F&& f, V& v) -> decltype(V::visit(v, std::forward<F>(f)))
{
return V::visit(v, std::forward<F>(f));
}
// binary visitor interface
// const
template <typename F, typename V>
auto VARIANT_INLINE apply_visitor(F&& f, V const& v0, V const& v1) -> decltype(V::binary_visit(v0, v1, std::forward<F>(f)))
{
return V::binary_visit(v0, v1, std::forward<F>(f));
}
// non-const
template <typename F, typename V>
auto VARIANT_INLINE apply_visitor(F&& f, V& v0, V& v1) -> decltype(V::binary_visit(v0, v1, std::forward<F>(f)))
{
return V::binary_visit(v0, v1, std::forward<F>(f));
}
// getter interface
#ifdef HAS_EXCEPTIONS
template <typename ResultType, typename T>
auto get(T& var)->decltype(var.template get<ResultType>())
{
return var.template get<ResultType>();
}
#endif
template <typename ResultType, typename T>
ResultType& get_unchecked(T& var)
{
return var.template get_unchecked<ResultType>();
}
#ifdef HAS_EXCEPTIONS
template <typename ResultType, typename T>
auto get(T const& var)->decltype(var.template get<ResultType>())
{
return var.template get<ResultType>();
}
#endif
template <typename ResultType, typename T>
ResultType const& get_unchecked(T const& var)
{
return var.template get_unchecked<ResultType>();
}
}
// } // namespace util
//} // namespace mapbox
#endif // MAPBOX_UTIL_VARIANT_HPP

View File

@ -33,11 +33,11 @@ namespace
}
template <typename U, typename T>
void copy_whole_attribute_array(gsl::span<T> dst, const gsl::byte* src_ptr, u8 attribute_size, u8 dst_stride, u32 src_stride, u32 first, u32 vertex_count)
void copy_whole_attribute_array(gsl::span<T> dst, gsl::span<const gsl::byte> src_ptr, u8 attribute_size, u8 dst_stride, u32 src_stride, u32 vertex_count)
{
for (u32 vertex = 0; vertex < vertex_count; ++vertex)
{
const U* src = reinterpret_cast<const U*>(src_ptr + src_stride * (first + vertex));
gsl::span<const U> src = gsl::as_span<const U>(src_ptr.subspan(src_stride * vertex, attribute_size * sizeof(const U)));
for (u32 i = 0; i < attribute_size; ++i)
{
dst[vertex * dst_stride / sizeof(T) + i] = src[i];
@ -46,7 +46,7 @@ namespace
}
}
void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride)
void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, gsl::span<const gsl::byte> src_ptr, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride)
{
verify(HERE), (vector_element_count > 0);
@ -56,7 +56,7 @@ void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, const
case rsx::vertex_base_type::ub256:
{
gsl::span<u8> dst_span = as_span_workaround<u8>(raw_dst_span);
copy_whole_attribute_array<u8>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count);
copy_whole_attribute_array<u8>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, count);
return;
}
case rsx::vertex_base_type::s1:
@ -64,13 +64,13 @@ void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, const
case rsx::vertex_base_type::s32k:
{
gsl::span<u16> dst_span = as_span_workaround<u16>(raw_dst_span);
copy_whole_attribute_array<be_t<u16>>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count);
copy_whole_attribute_array<be_t<u16>>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, count);
return;
}
case rsx::vertex_base_type::f:
{
gsl::span<u32> dst_span = as_span_workaround<u32>(raw_dst_span);
copy_whole_attribute_array<be_t<u32>>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, first, count);
copy_whole_attribute_array<be_t<u32>>(dst_span, src_ptr, vector_element_count, dst_stride, attribute_src_stride, count);
return;
}
case rsx::vertex_base_type::cmp:
@ -78,8 +78,11 @@ void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, const
gsl::span<u16> dst_span = as_span_workaround<u16>(raw_dst_span);
for (u32 i = 0; i < count; ++i)
{
auto* c_src = (const be_t<u32>*)(src_ptr + attribute_src_stride * (first + i));
const auto& decoded_vector = decode_cmp_vector(*c_src);
be_t<u32> src_value;
memcpy(&src_value,
src_ptr.subspan(attribute_src_stride * i).data(),
sizeof(be_t<u32>));
const auto& decoded_vector = decode_cmp_vector(src_value);
dst_span[i * dst_stride / sizeof(u16)] = decoded_vector[0];
dst_span[i * dst_stride / sizeof(u16) + 1] = decoded_vector[1];
dst_span[i * dst_stride / sizeof(u16) + 2] = decoded_vector[2];

View File

@ -7,10 +7,10 @@
#include "../RSXThread.h"
/**
* Write count vertex attributes from src_ptr starting at first.
* Write count vertex attributes from src_ptr.
* src_ptr array layout is deduced from the type, vector element count and src_stride arguments.
*/
void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, const gsl::byte *src_ptr, u32 first, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride);
void write_vertex_array_data_to_buffer(gsl::span<gsl::byte> raw_dst_span, gsl::span<const gsl::byte> src_ptr, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride);
/*
* If primitive mode is not supported and need to be emulated (using an index buffer) returns false.

View File

@ -7,6 +7,7 @@
#include "../Common/BufferUtils.h"
#include "D3D12Formats.h"
#include "../rsx_methods.h"
#include <Utilities/variant.hpp>
namespace
{
@ -80,77 +81,90 @@ namespace
}
}
namespace
{
struct vertex_buffer_visitor
{
std::vector<D3D12_SHADER_RESOURCE_VIEW_DESC> vertex_buffer_views;
vertex_buffer_visitor(u32 vtx_cnt, ID3D12GraphicsCommandList* cmdlst, ID3D12Resource* write_vertex_buffer,
d3d12_data_heap& heap)
: vertex_count(vtx_cnt)
, offset_in_vertex_buffers_buffer(0)
, m_buffer_data(heap)
, command_list(cmdlst)
, m_vertex_buffer_data(write_vertex_buffer)
{
}
void operator()(const rsx::vertex_array_buffer& vertex_array)
{
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size);
UINT buffer_size = element_size * vertex_count;
size_t heap_offset = m_buffer_data.alloc<D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT>(buffer_size);
void* mapped_buffer = m_buffer_data.map<void>(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
gsl::span<gsl::byte> mapped_buffer_span = {(gsl::byte*)mapped_buffer, gsl::narrow_cast<int>(buffer_size)};
write_vertex_array_data_to_buffer(mapped_buffer_span, vertex_array.data, vertex_count, vertex_array.type, vertex_array.attribute_size, vertex_array.stride, element_size);
m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
command_list->CopyBufferRegion(m_vertex_buffer_data, offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size);
vertex_buffer_views.emplace_back(get_vertex_attribute_srv(vertex_array.type, vertex_array.attribute_size, offset_in_vertex_buffers_buffer, buffer_size));
offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16
//m_timers.buffer_upload_size += buffer_size;
}
void operator()(const rsx::vertex_array_register& vertex_register)
{
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_register.type, vertex_register.attribute_size);
UINT buffer_size = element_size;
size_t heap_offset = m_buffer_data.alloc<D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT>(buffer_size);
void* mapped_buffer = m_buffer_data.map<void>(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
memcpy(mapped_buffer, vertex_register.data.data(), buffer_size);
m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
command_list->CopyBufferRegion(m_vertex_buffer_data, offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size);
vertex_buffer_views.emplace_back(get_vertex_attribute_srv(vertex_register.type, vertex_register.attribute_size, offset_in_vertex_buffers_buffer, buffer_size));
offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16
}
void operator()(const rsx::empty_vertex_array& vbo)
{
}
protected:
u32 vertex_count;
ID3D12GraphicsCommandList* command_list;
ID3D12Resource* m_vertex_buffer_data;
size_t offset_in_vertex_buffers_buffer;
d3d12_data_heap& m_buffer_data;
};
} // End anonymous namespace
std::vector<D3D12_SHADER_RESOURCE_VIEW_DESC> D3D12GSRender::upload_vertex_attributes(
const std::vector<std::pair<u32, u32>>& vertex_ranges,
gsl::not_null<ID3D12GraphicsCommandList*> command_list)
{
std::vector<D3D12_SHADER_RESOURCE_VIEW_DESC> vertex_buffer_views;
command_list->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_vertex_buffer_data.Get(), D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER, D3D12_RESOURCE_STATE_COPY_DEST));
u32 vertex_count = get_vertex_count(vertex_ranges);
size_t offset_in_vertex_buffers_buffer = 0;
u32 input_mask = rsx::method_registers.vertex_attrib_input_mask();
verify(HERE), rsx::method_registers.vertex_data_base_index() == 0;
for (int index = 0; index < rsx::limits::vertex_count; ++index)
{
bool enabled = !!(input_mask & (1 << index));
if (!enabled)
continue;
vertex_buffer_visitor visitor(vertex_count, command_list, m_vertex_buffer_data.Get(), m_buffer_data);
const auto& vertex_buffers = get_vertex_buffers(rsx::method_registers, vertex_ranges);
if (rsx::method_registers.vertex_arrays_info[index].size > 0)
{
// Active vertex array
const rsx::data_array_format_info &info = rsx::method_registers.vertex_arrays_info[index];
for (const auto& vbo : vertex_buffers)
std::apply_visitor(visitor, vbo);
u32 element_size = rsx::get_vertex_type_size_on_host(info.type, info.size);
UINT buffer_size = element_size * vertex_count;
size_t heap_offset = m_buffer_data.alloc<D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT>(buffer_size);
u32 base_offset = rsx::method_registers.vertex_data_base_offset();
u32 offset = rsx::method_registers.vertex_arrays_info[index].offset();
u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31);
const gsl::byte *src_ptr = gsl::narrow_cast<const gsl::byte*>(vm::base(address));
void *mapped_buffer = m_buffer_data.map<void>(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
for (const auto &range : vertex_ranges)
{
gsl::span<gsl::byte> mapped_buffer_span = { (gsl::byte*)mapped_buffer, gsl::narrow_cast<int>(buffer_size) };
write_vertex_array_data_to_buffer(mapped_buffer_span, src_ptr, range.first, range.second, info.type, info.size, info.stride, element_size);
mapped_buffer = (char*)mapped_buffer + range.second * element_size;
}
m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
command_list->CopyBufferRegion(m_vertex_buffer_data.Get(), offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size);
vertex_buffer_views.emplace_back(get_vertex_attribute_srv(info, offset_in_vertex_buffers_buffer, buffer_size));
offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16
m_timers.buffer_upload_size += buffer_size;
}
else if (rsx::method_registers.register_vertex_info[index].size > 0)
{
// In register vertex attribute
const rsx::register_vertex_data_info &info = rsx::method_registers.register_vertex_info[index];
u32 element_size = rsx::get_vertex_type_size_on_host(info.type, info.size);
UINT buffer_size = element_size;
size_t heap_offset = m_buffer_data.alloc<D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT>(buffer_size);
void *mapped_buffer = m_buffer_data.map<void>(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
memcpy(mapped_buffer, info.data.data(), buffer_size);
m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size));
command_list->CopyBufferRegion(m_vertex_buffer_data.Get(), offset_in_vertex_buffers_buffer, m_buffer_data.get_heap(), heap_offset, buffer_size);
vertex_buffer_views.emplace_back(get_vertex_attribute_srv(info.type, info.size, offset_in_vertex_buffers_buffer, buffer_size));
offset_in_vertex_buffers_buffer = get_next_multiple_of<48>(offset_in_vertex_buffers_buffer + buffer_size); // 48 is multiple of 2, 4, 6, 8, 12, 16
}
}
command_list->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_vertex_buffer_data.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER));
return vertex_buffer_views;
return visitor.vertex_buffer_views;
}
namespace

View File

@ -61,7 +61,7 @@ private:
// Return element to draw and in case of indexed draw index type and offset in index buffer
std::tuple<u32, std::optional<std::tuple<GLenum, u32> > > set_vertex_buffer();
void upload_vertex_buffers(const u32 &max_index, const u32 &max_vertex_attrib_size, const u32 &input_mask, const u32 &texture_index_offset);
void upload_vertex_buffers(u32 min_index, u32 max_index, const u32& max_vertex_attrib_size, const u32& texture_index_offset);
// Returns vertex count
u32 upload_inline_array(const u32 &max_vertex_attrib_size, const u32 &texture_index_offset);

View File

@ -222,7 +222,7 @@ std::tuple<u32, std::optional<std::tuple<GLenum, u32> > > GLGSRender::set_vertex
u32 offset_in_index_buffer = mapping.second;
std::tie(min_index, max_index, vertex_or_index_count) = upload_index_buffer(get_raw_index_array(rsx::method_registers.current_draw_clause.first_count_commands), ptr, type, rsx::method_registers.current_draw_clause.primitive, rsx::method_registers.current_draw_clause.first_count_commands, vertex_or_index_count);
min_index = 0; // we must keep index to vertex mapping
m_index_ring_buffer.unmap();
index_info = std::make_tuple(get_index_type(type), offset_in_index_buffer);
}
@ -238,7 +238,8 @@ std::tuple<u32, std::optional<std::tuple<GLenum, u32> > > GLGSRender::set_vertex
{
assert(rsx::method_registers.current_draw_clause.command == rsx::draw_command::array);
vertex_count = rsx::method_registers.current_draw_clause.get_elements_count();
max_index = vertex_count - 1;
min_index = rsx::method_registers.current_draw_clause.first_count_commands.front().first;
max_index = vertex_count - 1 + min_index;
}
if (!gl::is_primitive_native(rsx::method_registers.current_draw_clause.primitive))
@ -259,7 +260,7 @@ std::tuple<u32, std::optional<std::tuple<GLenum, u32> > > GLGSRender::set_vertex
return std::make_tuple(vertex_or_index_count, index_info);
}
upload_vertex_buffers(max_index, max_vertex_attrib_size, input_mask, texture_index_offset);
upload_vertex_buffers(min_index, max_index, max_vertex_attrib_size, texture_index_offset);
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
m_vertex_upload_time += std::chrono::duration_cast<std::chrono::microseconds>(now - then).count();
@ -267,118 +268,126 @@ std::tuple<u32, std::optional<std::tuple<GLenum, u32> > > GLGSRender::set_vertex
return std::make_tuple(vertex_or_index_count, index_info);
}
void GLGSRender::upload_vertex_buffers(const u32 &max_index, const u32 &max_vertex_attrib_size, const u32 &input_mask, const u32 &texture_index_offset)
namespace
{
u32 verts_allocated = max_index + 1;
__glcheck m_attrib_ring_buffer.reserve_and_map(verts_allocated * max_vertex_attrib_size);
struct vertex_buffer_visitor
{
vertex_buffer_visitor(u32 vtx_cnt,
u32 texture_idx_offset,
gl::ring_buffer& heap, gl::glsl::program* prog,
gl::texture* attrib_buffer,
u32 min_texbuffer_offset)
: vertex_count(vtx_cnt)
, m_attrib_ring_info(heap)
, m_program(prog)
, texture_index_offset(texture_idx_offset)
, m_gl_attrib_buffers(attrib_buffer)
, m_min_texbuffer_alignment(min_texbuffer_offset)
{
}
void operator()(const rsx::vertex_array_buffer& vertex_array)
{
int location;
if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[vertex_array.index] + "_buffer", &location))
return;
// Fill vertex_array
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size);
u32 data_size = vertex_count * element_size;
u32 gl_type = to_gl_internal_type(vertex_array.type, vertex_array.attribute_size);
auto& texture = m_gl_attrib_buffers[vertex_array.index];
u32 buffer_offset = 0;
auto mapping = m_attrib_ring_info.alloc_from_reserve(data_size, m_min_texbuffer_alignment);
gsl::byte* dst = static_cast<gsl::byte*>(mapping.first);
buffer_offset = mapping.second;
gsl::span<gsl::byte> dest_span(dst, data_size);
prepare_buffer_for_writing(dst, vertex_array.type, vertex_array.attribute_size, vertex_count);
write_vertex_array_data_to_buffer(dest_span, vertex_array.data, vertex_count, vertex_array.type, vertex_array.attribute_size, vertex_array.stride, rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size));
texture.copy_from(m_attrib_ring_info, gl_type, buffer_offset, data_size);
//Link texture to uniform
m_program->uniforms.texture(location, vertex_array.index + texture_index_offset, texture);
}
void operator()(const rsx::vertex_array_register& vertex_register)
{
int location;
if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[vertex_register.index] + "_buffer", &location))
return;
switch (vertex_register.type)
{
case rsx::vertex_base_type::f:
{
const u32 element_size = rsx::get_vertex_type_size_on_host(vertex_register.type, vertex_register.attribute_size);
const u32 gl_type = to_gl_internal_type(vertex_register.type, vertex_register.attribute_size);
const size_t data_size = element_size;
auto& texture = m_gl_attrib_buffers[vertex_register.index];
auto mapping = m_attrib_ring_info.alloc_from_reserve(data_size, m_min_texbuffer_alignment);
u8 *dst = static_cast<u8*>(mapping.first);
memcpy(dst, vertex_register.data.data(), element_size);
texture.copy_from(m_attrib_ring_info, gl_type, mapping.second, data_size);
//Link texture to uniform
m_program->uniforms.texture(location, vertex_register.index + texture_index_offset, texture);
break;
}
default:
LOG_ERROR(RSX, "bad non array vertex data format (type=%d, size=%d)", (u32)vertex_register.type, vertex_register.attribute_size);
break;
}
}
void operator()(const rsx::empty_vertex_array& vbo)
{
int location;
if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[vbo.index] + "_buffer", &location))
return;
glActiveTexture(GL_TEXTURE0 + vbo.index + texture_index_offset);
glBindTexture(GL_TEXTURE_BUFFER, 0);
glProgramUniform1i(m_program->id(), location, vbo.index + texture_index_offset);
}
protected:
u32 vertex_count;
gl::ring_buffer& m_attrib_ring_info;
gl::glsl::program* m_program;
u32 texture_index_offset;
gl::texture* m_gl_attrib_buffers;
GLint m_min_texbuffer_alignment;
};
} // End anonymous namespace
void GLGSRender::upload_vertex_buffers(u32 min_index, u32 max_index, const u32& max_vertex_attrib_size, const u32& texture_index_offset)
{
u32 verts_allocated = max_index - min_index + 1;
__glcheck m_attrib_ring_buffer.reserve_and_map(verts_allocated * max_vertex_attrib_size);
// Disable texture then reenable them
// Is it really necessary ?
for (int index = 0; index < rsx::limits::vertex_count; ++index)
{
int location;
if (!m_program->uniforms.has_location(rsx::vertex_program::input_attrib_names[index] + "_buffer", &location))
continue;
bool enabled = !!(input_mask & (1 << index));
if (!enabled)
{
glActiveTexture(GL_TEXTURE0 + index + texture_index_offset);
glBindTexture(GL_TEXTURE_BUFFER, 0);
glProgramUniform1i(m_program->id(), location, index + texture_index_offset);
continue;
}
if (rsx::method_registers.vertex_arrays_info[index].size > 0)
{
auto &vertex_info = rsx::method_registers.vertex_arrays_info[index];
// Fill vertex_array
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size);
//vertex_array.resize(vertex_draw_count * element_size);
u32 data_size = verts_allocated * element_size;
u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size);
auto &texture = m_gl_attrib_buffers[index];
u32 buffer_offset = 0;
// Get source pointer
u32 base_offset = rsx::method_registers.vertex_data_base_offset();
u32 offset = rsx::method_registers.vertex_arrays_info[index].offset();
u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31);
const gsl::byte *src_ptr = gsl::narrow_cast<const gsl::byte*>(vm::base(address));
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array)
{
auto mapping = m_attrib_ring_buffer.alloc_from_reserve(data_size, m_min_texbuffer_alignment);
gsl::byte *dst = static_cast<gsl::byte*>(mapping.first);
buffer_offset = mapping.second;
size_t offset = 0;
gsl::span<gsl::byte> dest_span(dst, data_size);
prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, verts_allocated);
for (const auto &first_count : rsx::method_registers.current_draw_clause.first_count_commands)
{
write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size));
offset += first_count.second * element_size;
}
}
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed)
{
data_size = (max_index + 1) * element_size;
auto mapping = m_attrib_ring_buffer.alloc_from_reserve(data_size, m_min_texbuffer_alignment);
gsl::byte *dst = static_cast<gsl::byte*>(mapping.first);
buffer_offset = mapping.second;
gsl::span<gsl::byte> dest_span(dst, data_size);
prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, verts_allocated);
write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size));
}
texture.copy_from(m_attrib_ring_buffer, gl_type, buffer_offset, data_size);
//Link texture to uniform
m_program->uniforms.texture(location, index + texture_index_offset, texture);
}
else if (rsx::method_registers.register_vertex_info[index].size > 0)
{
auto &vertex_info = rsx::method_registers.register_vertex_info[index];
switch (vertex_info.type)
{
case rsx::vertex_base_type::f:
{
const u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size);
const u32 gl_type = to_gl_internal_type(vertex_info.type, vertex_info.size);
const size_t data_size = element_size;
auto &texture = m_gl_attrib_buffers[index];
auto mapping = m_attrib_ring_buffer.alloc_from_reserve(data_size, m_min_texbuffer_alignment);
u8 *dst = static_cast<u8*>(mapping.first);
memcpy(dst, vertex_info.data.data(), element_size);
texture.copy_from(m_attrib_ring_buffer, gl_type, mapping.second, data_size);
//Link texture to uniform
m_program->uniforms.texture(location, index + texture_index_offset, texture);
break;
}
default:
LOG_ERROR(RSX, "bad non array vertex data format (type=%d, size=%d)", (u32)vertex_info.type, vertex_info.size);
break;
}
}
else
{
glActiveTexture(GL_TEXTURE0 + index + texture_index_offset);
glBindTexture(GL_TEXTURE_BUFFER, 0);
glProgramUniform1i(m_program->id(), location, index + texture_index_offset);
continue;
}
}
vertex_buffer_visitor visitor(verts_allocated, texture_index_offset, m_attrib_ring_buffer, m_program, m_gl_attrib_buffers, m_min_texbuffer_alignment);
const auto& vertex_buffers = get_vertex_buffers(rsx::method_registers, { {min_index, verts_allocated } });
for (const auto& vbo : vertex_buffers)
std::apply_visitor(visitor, vbo);
m_attrib_ring_buffer.unmap();
}

View File

@ -583,6 +583,58 @@ namespace rsx
return{ ptr, count * type_size };
}
gsl::span<const gsl::byte> thread::get_raw_vertex_buffer(const rsx::data_array_format_info& vertex_array_info, u32 base_offset, const std::vector<std::pair<u32, u32>>& vertex_ranges) const
{
u32 offset = vertex_array_info.offset();
u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31);
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array_info.type, vertex_array_info.size);
// Disjoint first_counts ranges not supported atm
for (int i = 0; i < vertex_ranges.size() - 1; i++)
{
const std::tuple<u32, u32>& range = vertex_ranges[i];
const std::tuple<u32, u32>& next_range = vertex_ranges[i + 1];
verify(HERE), (std::get<0>(range) + std::get<1>(range) == std::get<0>(next_range));
}
u32 first = std::get<0>(vertex_ranges.front());
u32 count = std::get<0>(vertex_ranges.back()) + std::get<1>(vertex_ranges.back()) - first;
const gsl::byte* ptr = gsl::narrow_cast<const gsl::byte*>(vm::base(address));
return {ptr + first * vertex_array_info.stride, count * vertex_array_info.stride + element_size};
}
std::vector<std::variant<vertex_array_buffer, vertex_array_register, empty_vertex_array>> thread::get_vertex_buffers(const rsx::rsx_state& state, const std::vector<std::pair<u32, u32>>& vertex_ranges) const
{
std::vector<std::variant<vertex_array_buffer, vertex_array_register, empty_vertex_array>> result;
u32 input_mask = state.vertex_attrib_input_mask();
for (u8 index = 0; index < rsx::limits::vertex_count; ++index)
{
bool enabled = !!(input_mask & (1 << index));
if (!enabled)
continue;
if (state.vertex_arrays_info[index].size > 0)
{
const rsx::data_array_format_info& info = state.vertex_arrays_info[index];
result.push_back(vertex_array_buffer{info.type, info.size, info.stride,
get_raw_vertex_buffer(info, state.vertex_data_base_offset(), vertex_ranges), index});
continue;
}
if (state.register_vertex_info[index].size > 0)
{
const rsx::register_vertex_data_info& info = state.register_vertex_info[index];
result.push_back(vertex_array_register{info.type, info.size, info.data, index});
continue;
}
result.push_back(empty_vertex_array{index});
}
return result;
}
void thread::do_internal_task()
{
if (m_internal_tasks.empty())

View File

@ -17,6 +17,7 @@
#include "Utilities/Timer.h"
#include "Utilities/geometry.h"
#include "rsx_trace.h"
#include "Utilities/variant.hpp"
extern u64 get_system_time();
@ -143,6 +144,28 @@ namespace rsx
}
};
struct vertex_array_buffer
{
rsx::vertex_base_type type;
u8 attribute_size;
u8 stride;
gsl::span<const gsl::byte> data;
u8 index;
};
struct vertex_array_register
{
rsx::vertex_base_type type;
u8 attribute_size;
std::array<u32, 4> data;
u8 index;
};
struct empty_vertex_array
{
u8 index;
};
class thread : public named_thread
{
std::shared_ptr<thread_ctrl> m_vblank_thread;
@ -237,6 +260,9 @@ namespace rsx
virtual bool on_access_violation(u32 address, bool is_writing) { return false; }
gsl::span<const gsl::byte> get_raw_index_array(const std::vector<std::pair<u32, u32> >& draw_indexed_clause) const;
gsl::span<const gsl::byte> get_raw_vertex_buffer(const rsx::data_array_format_info&, u32 base_offset, const std::vector<std::pair<u32, u32>>& vertex_ranges) const;
std::vector<std::variant<vertex_array_buffer, vertex_array_register, empty_vertex_array>> get_vertex_buffers(const rsx::rsx_state& state, const std::vector<std::pair<u32, u32>>& vertex_ranges) const;
private:
std::mutex m_mtx_task;

View File

@ -88,7 +88,7 @@ private:
/// returns primitive topology, is_indexed, index_count, offset in index buffer, index type
std::tuple<VkPrimitiveTopology, u32, std::optional<std::tuple<VkDeviceSize, VkIndexType> > > upload_vertex_data();
void upload_vertex_buffers(u32 input_mask, u32 vertex_max_index);
void upload_vertex_buffers(u32 min_index, u32 vertex_max_index);
/// returns number of vertex drawn
u32 upload_inlined_array();

View File

@ -233,6 +233,7 @@ VKGSRender::upload_vertex_data()
if (is_indexed_draw)
{
std::tie(min_index, max_index, index_count, index_info) = upload_index_buffer(rsx::method_registers.current_draw_clause);
min_index = 0; // We need correct index mapping
}
bool primitives_emulated = false;
@ -248,9 +249,8 @@ VKGSRender::upload_vertex_data()
{
index_count = rsx::method_registers.current_draw_clause.get_elements_count();
}
min_index = 0;
max_index = rsx::method_registers.current_draw_clause.get_elements_count() - 1;
min_index = rsx::method_registers.current_draw_clause.first_count_commands.front().first;
max_index = rsx::method_registers.current_draw_clause.get_elements_count() + min_index;
}
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::inlined_array)
@ -265,127 +265,119 @@ VKGSRender::upload_vertex_data()
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array || rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed)
{
upload_vertex_buffers(input_mask, max_index);
upload_vertex_buffers(min_index, max_index);
}
return std::make_tuple(prims, index_count, index_info);
}
void VKGSRender::upload_vertex_buffers(u32 input_mask, u32 vertex_max_index)
namespace
{
for (int index = 0; index < rsx::limits::vertex_count; ++index)
struct vertex_buffer_visitor
{
bool enabled = !!(input_mask & (1 << index));
if (!m_program->has_uniform(s_reg_table[index]))
continue;
if (!enabled)
vertex_buffer_visitor(u32 vtx_cnt, VkDevice dev,
vk::vk_data_heap& heap, vk::glsl::program* prog,
VkDescriptorSet desc_set,
std::vector<std::unique_ptr<vk::buffer_view>>& buffer_view_to_clean)
: vertex_count(vtx_cnt)
, m_attrib_ring_info(heap)
, device(dev)
, m_program(prog)
, descriptor_sets(desc_set)
, m_buffer_view_to_clean(buffer_view_to_clean)
{
continue;
}
if (rsx::method_registers.vertex_arrays_info[index].size > 0)
void operator()(const rsx::vertex_array_buffer& vertex_array)
{
auto &vertex_info = rsx::method_registers.vertex_arrays_info[index];
// Fill vertex_array
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size);
u32 real_element_size = vk::get_suitable_vk_size(vertex_info.type, vertex_info.size);
u32 element_size = rsx::get_vertex_type_size_on_host(vertex_array.type, vertex_array.attribute_size);
u32 real_element_size = vk::get_suitable_vk_size(vertex_array.type, vertex_array.attribute_size);
u32 upload_size = real_element_size * (vertex_max_index + 1);
bool requires_expansion = vk::requires_component_expansion(vertex_info.type, vertex_info.size);
u32 upload_size = real_element_size * vertex_count;
bool requires_expansion = vk::requires_component_expansion(vertex_array.type, vertex_array.attribute_size);
// Get source pointer
u32 base_offset = rsx::method_registers.vertex_data_base_offset();
u32 offset = rsx::method_registers.vertex_arrays_info[index].offset();
u32 address = base_offset + rsx::get_address(offset & 0x7fffffff, offset >> 31);
const gsl::byte *src_ptr = gsl::narrow_cast<const gsl::byte*>(vm::base(address));
u32 num_stored_verts = vertex_max_index + 1;
VkDeviceSize offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(upload_size);
void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, upload_size);
vk::prepare_buffer_for_writing(dst, vertex_info.type, vertex_info.size, vertex_max_index + 1);
vk::prepare_buffer_for_writing(dst, vertex_array.type, vertex_array.attribute_size, vertex_count);
gsl::span<gsl::byte> dest_span(static_cast<gsl::byte*>(dst), upload_size);
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array)
{
VkDeviceSize offset = 0;
for (const auto &first_count : rsx::method_registers.current_draw_clause.first_count_commands)
{
write_vertex_array_data_to_buffer(dest_span.subspan(offset), src_ptr, first_count.first, first_count.second, vertex_info.type, vertex_info.size, vertex_info.stride, real_element_size);
offset += first_count.second * real_element_size;
}
}
else if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed)
{
write_vertex_array_data_to_buffer(dest_span, src_ptr, 0, vertex_max_index + 1, vertex_info.type, vertex_info.size, vertex_info.stride, real_element_size);
}
write_vertex_array_data_to_buffer(dest_span, vertex_array.data, vertex_count, vertex_array.type, vertex_array.attribute_size, vertex_array.stride, real_element_size);
m_attrib_ring_info.unmap();
const VkFormat format = vk::get_suitable_vk_format(vertex_info.type, vertex_info.size);
const VkFormat format = vk::get_suitable_vk_format(vertex_array.type, vertex_array.attribute_size);
m_buffer_view_to_clean.push_back(std::make_unique<vk::buffer_view>(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, upload_size));
m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[index], descriptor_sets);
m_buffer_view_to_clean.push_back(std::make_unique<vk::buffer_view>(device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, upload_size));
m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[vertex_array.index], descriptor_sets);
}
else if (rsx::method_registers.register_vertex_info[index].size > 0)
{
//Untested!
auto &vertex_info = rsx::method_registers.register_vertex_info[index];
switch (vertex_info.type)
void operator()(const rsx::vertex_array_register& vertex_register)
{
switch (vertex_register.type)
{
case rsx::vertex_base_type::f:
{
size_t data_size = rsx::get_vertex_type_size_on_host(vertex_info.type, vertex_info.size);
const VkFormat format = vk::get_suitable_vk_format(vertex_info.type, vertex_info.size);
size_t data_size = rsx::get_vertex_type_size_on_host(vertex_register.type, vertex_register.attribute_size);
const VkFormat format = vk::get_suitable_vk_format(vertex_register.type, vertex_register.attribute_size);
u32 offset_in_attrib_buffer = 0;
void *data_ptr = vertex_info.data.data();
if (vk::requires_component_expansion(vertex_info.type, vertex_info.size))
if (vk::requires_component_expansion(vertex_register.type, vertex_register.attribute_size))
{
const u32 num_stored_verts = static_cast<u32>(data_size / (sizeof(float) * vertex_info.size));
const u32 real_element_size = vk::get_suitable_vk_size(vertex_info.type, vertex_info.size);
const u32 num_stored_verts = static_cast<u32>(data_size / (sizeof(float) * vertex_register.attribute_size));
const u32 real_element_size = vk::get_suitable_vk_size(vertex_register.type, vertex_register.attribute_size);
data_size = real_element_size * num_stored_verts;
offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(data_size);
void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, data_size);
vk::expand_array_components<float, 3, 4, 1>(reinterpret_cast<float*>(vertex_info.data.data()), dst, num_stored_verts);
vk::expand_array_components<float, 3, 4, 1>(reinterpret_cast<const float*>(vertex_register.data.data()), dst, num_stored_verts);
m_attrib_ring_info.unmap();
}
else
{
offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(data_size);
void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, data_size);
memcpy(dst, vertex_info.data.data(), data_size);
memcpy(dst, vertex_register.data.data(), data_size);
m_attrib_ring_info.unmap();
}
m_buffer_view_to_clean.push_back(std::make_unique<vk::buffer_view>(*m_device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size));
m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[index], descriptor_sets);
m_buffer_view_to_clean.push_back(std::make_unique<vk::buffer_view>(device, m_attrib_ring_info.heap->value, format, offset_in_attrib_buffer, data_size));
m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[vertex_register.index], descriptor_sets);
break;
}
default:
fmt::throw_exception("Unknown base type %d" HERE, (u32)vertex_info.type);
fmt::throw_exception("Unknown base type %d" HERE, (u32)vertex_register.type);
}
}
else
{
//This section should theoretically be unreachable (data stream without available data)
//Variable is defined in the shaders but no data is available
//Bind a buffer view to keep the driver from crashing if access is attempted.
void operator()(const rsx::empty_vertex_array& vbo)
{
u32 offset_in_attrib_buffer = m_attrib_ring_info.alloc<256>(32);
void *dst = m_attrib_ring_info.map(offset_in_attrib_buffer, 32);
memset(dst, 0, 32);
m_attrib_ring_info.unmap();
m_buffer_view_to_clean.push_back(std::make_unique<vk::buffer_view>(device, m_attrib_ring_info.heap->value, VK_FORMAT_R32_SFLOAT, offset_in_attrib_buffer, 32));
m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[vbo.index], descriptor_sets);
}
m_buffer_view_to_clean.push_back(std::make_unique<vk::buffer_view>(*m_device, m_attrib_ring_info.heap->value, VK_FORMAT_R32_SFLOAT, offset_in_attrib_buffer, 32));
m_program->bind_uniform(m_buffer_view_to_clean.back()->value, s_reg_table[index], descriptor_sets);
}
}
protected:
VkDevice device;
u32 vertex_count;
vk::vk_data_heap& m_attrib_ring_info;
vk::glsl::program* m_program;
VkDescriptorSet descriptor_sets;
std::vector<std::unique_ptr<vk::buffer_view>>& m_buffer_view_to_clean;
};
} // End anonymous namespace
void VKGSRender::upload_vertex_buffers(u32 min_index, u32 vertex_max_index)
{
vertex_buffer_visitor visitor(vertex_max_index - min_index + 1, *m_device, m_attrib_ring_info, m_program, descriptor_sets, m_buffer_view_to_clean);
const auto& vertex_buffers = get_vertex_buffers(rsx::method_registers, {{min_index, vertex_max_index - min_index + 1}});
for (const auto& vbo : vertex_buffers)
std::apply_visitor(visitor, vbo);
}
u32 VKGSRender::upload_inlined_array()

View File

@ -213,8 +213,8 @@ SettingsDialog::SettingsDialog(wxWindow* parent)
std::vector<std::unique_ptr<cfg_adapter>> pads;
static const u32 width = 458;
static const u32 height = 400;
static const u32 width = 458 * 2;
static const u32 height = 400 * 2;
// Settings panels
wxNotebook* nb_config = new wxNotebook(this, wxID_ANY, wxPoint(6, 6), wxSize(width, height));
@ -521,7 +521,7 @@ SettingsDialog::SettingsDialog(wxWindow* parent)
SetSizerAndFit(s_subpanel_system, false);
SetSizerAndFit(s_b_panel, false);
SetSize(width + 26, height + 80);
SetSize(width + 26 * 2, height + 80 * 2);
if (ShowModal() == wxID_OK)
{