Update C++ FlatBuffers lib, check in new codegen, and rebuild cores

This commit is contained in:
YoshiRulz 2022-09-28 08:37:49 +10:00
parent 158c897702
commit 04fcf59afe
No known key found for this signature in database
GPG Key ID: C4DE31C245353FB7
39 changed files with 5182 additions and 3248 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -6,6 +6,13 @@
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 22 &&
FLATBUFFERS_VERSION_MINOR == 9 &&
FLATBUFFERS_VERSION_REVISION == 24,
"Non-compatible flatbuffers version included");
namespace NymaTypes {
struct EnumValue;
@ -60,7 +67,7 @@ struct NPorts;
struct NPortsBuilder;
struct NPortsT;
enum SettingType {
enum SettingType : int32_t {
/// (signed), int8, int16, int32, int64(saved as)
SettingType_Int = 0,
/// uint8, uint16, uint32, uint64(saved as)
@ -115,7 +122,7 @@ inline const char *EnumNameSettingType(SettingType e) {
return EnumNamesSettingType()[index];
}
enum SettingsFlags {
enum SettingsFlags : uint32_t {
/// TODO(cats)
SettingsFlags_Input = 256,
SettingsFlags_Sound = 512,
@ -176,7 +183,7 @@ inline const char *EnumNameSettingsFlags(SettingsFlags e) {
}
}
enum InputType {
enum InputType : uint8_t {
InputType_Padding = 0,
InputType_Button = 1,
InputType_ButtonCanRapid = 2,
@ -239,7 +246,7 @@ inline const char *EnumNameInputType(InputType e) {
return EnumNamesInputType()[index];
}
enum AxisFlags {
enum AxisFlags : uint8_t {
AxisFlags_Sqlr = 1,
AxisFlags_InvertCo = 2,
AxisFlags_SettingsUndoc = 128,
@ -265,7 +272,7 @@ inline const char *EnumNameAxisFlags(AxisFlags e) {
}
}
enum DeviceFlags {
enum DeviceFlags : uint8_t {
DeviceFlags_Keyboard = 1,
DeviceFlags_NONE = 0,
DeviceFlags_ANY = 1
@ -292,7 +299,7 @@ inline const char *EnumNameDeviceFlags(DeviceFlags e) {
return EnumNamesDeviceFlags()[index];
}
enum NInputExtra {
enum NInputExtra : uint8_t {
NInputExtra_NONE = 0,
NInputExtra_Button = 1,
NInputExtra_Axis = 2,
@ -351,6 +358,26 @@ template<> struct NInputExtraTraits<NymaTypes::NStatusInfo> {
static const NInputExtra enum_value = NInputExtra_Status;
};
template<typename T> struct NInputExtraUnionTraits {
static const NInputExtra enum_value = NInputExtra_NONE;
};
template<> struct NInputExtraUnionTraits<NymaTypes::NButtonInfoT> {
static const NInputExtra enum_value = NInputExtra_Button;
};
template<> struct NInputExtraUnionTraits<NymaTypes::NAxisInfoT> {
static const NInputExtra enum_value = NInputExtra_Axis;
};
template<> struct NInputExtraUnionTraits<NymaTypes::NSwitchInfoT> {
static const NInputExtra enum_value = NInputExtra_Switch;
};
template<> struct NInputExtraUnionTraits<NymaTypes::NStatusInfoT> {
static const NInputExtra enum_value = NInputExtra_Status;
};
struct NInputExtraUnion {
NInputExtra type;
void *value;
@ -368,17 +395,15 @@ struct NInputExtraUnion {
void Reset();
#ifndef FLATBUFFERS_CPP98_STL
template <typename T>
void Set(T&& val) {
using RT = typename std::remove_reference<T>::type;
typedef typename std::remove_reference<T>::type RT;
Reset();
type = NInputExtraTraits<typename RT::TableType>::enum_value;
type = NInputExtraUnionTraits<RT>::enum_value;
if (type != NInputExtra_NONE) {
value = new RT(std::forward<T>(val));
}
}
#endif // FLATBUFFERS_CPP98_STL
static void *UnPack(const void *obj, NInputExtra type, const flatbuffers::resolver_function_t *resolver);
flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
@ -422,11 +447,9 @@ bool VerifyNInputExtraVector(flatbuffers::Verifier &verifier, const flatbuffers:
struct EnumValueT : public flatbuffers::NativeTable {
typedef EnumValue TableType;
std::string Name;
std::string Description;
std::string Value;
EnumValueT() {
}
std::string Name{};
std::string Description{};
std::string Value{};
};
struct EnumValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -478,7 +501,6 @@ struct EnumValueBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
EnumValueBuilder &operator=(const EnumValueBuilder &);
flatbuffers::Offset<EnumValue> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<EnumValue>(end);
@ -517,19 +539,19 @@ flatbuffers::Offset<EnumValue> CreateEnumValue(flatbuffers::FlatBufferBuilder &_
struct SettingT : public flatbuffers::NativeTable {
typedef Setting TableType;
std::string Name;
std::string Description;
std::string SettingsKey;
std::string DefaultValue;
std::string Min;
std::string Max;
NymaTypes::SettingsFlags Flags;
NymaTypes::SettingType Type;
std::vector<std::unique_ptr<NymaTypes::EnumValueT>> SettingEnums;
SettingT()
: Flags(static_cast<NymaTypes::SettingsFlags>(0)),
Type(NymaTypes::SettingType_Int) {
}
std::string Name{};
std::string Description{};
std::string SettingsKey{};
std::string DefaultValue{};
std::string Min{};
std::string Max{};
NymaTypes::SettingsFlags Flags = static_cast<NymaTypes::SettingsFlags>(0);
NymaTypes::SettingType Type = NymaTypes::SettingType_Int;
std::vector<std::unique_ptr<NymaTypes::EnumValueT>> SettingEnums{};
SettingT() = default;
SettingT(const SettingT &o);
SettingT(SettingT&&) FLATBUFFERS_NOEXCEPT = default;
SettingT &operator=(SettingT o) FLATBUFFERS_NOEXCEPT;
};
struct Setting FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -587,8 +609,8 @@ struct Setting FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyString(Min()) &&
VerifyOffset(verifier, VT_MAX) &&
verifier.VerifyString(Max()) &&
VerifyField<uint32_t>(verifier, VT_FLAGS) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
VerifyField<uint32_t>(verifier, VT_FLAGS, 4) &&
VerifyField<int32_t>(verifier, VT_TYPE, 4) &&
VerifyOffset(verifier, VT_SETTINGENUMS) &&
verifier.VerifyVector(SettingEnums()) &&
verifier.VerifyVectorOfTables(SettingEnums()) &&
@ -634,7 +656,6 @@ struct SettingBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
SettingBuilder &operator=(const SettingBuilder &);
flatbuffers::Offset<Setting> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Setting>(end);
@ -701,9 +722,11 @@ flatbuffers::Offset<Setting> CreateSetting(flatbuffers::FlatBufferBuilder &_fbb,
struct SettingsT : public flatbuffers::NativeTable {
typedef Settings TableType;
std::vector<std::unique_ptr<NymaTypes::SettingT>> Values;
SettingsT() {
}
std::vector<std::unique_ptr<NymaTypes::SettingT>> Values{};
SettingsT() = default;
SettingsT(const SettingsT &o);
SettingsT(SettingsT&&) FLATBUFFERS_NOEXCEPT = default;
SettingsT &operator=(SettingsT o) FLATBUFFERS_NOEXCEPT;
};
struct Settings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -738,7 +761,6 @@ struct SettingsBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
SettingsBuilder &operator=(const SettingsBuilder &);
flatbuffers::Offset<Settings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Settings>(end);
@ -767,9 +789,7 @@ flatbuffers::Offset<Settings> CreateSettings(flatbuffers::FlatBufferBuilder &_fb
struct NButtonInfoT : public flatbuffers::NativeTable {
typedef NButtonInfo TableType;
std::string ExcludeName;
NButtonInfoT() {
}
std::string ExcludeName{};
};
struct NButtonInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -803,7 +823,6 @@ struct NButtonInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NButtonInfoBuilder &operator=(const NButtonInfoBuilder &);
flatbuffers::Offset<NButtonInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NButtonInfo>(end);
@ -832,12 +851,10 @@ flatbuffers::Offset<NButtonInfo> CreateNButtonInfo(flatbuffers::FlatBufferBuilde
struct NAxisInfoT : public flatbuffers::NativeTable {
typedef NAxisInfo TableType;
std::string SettingsNameNeg;
std::string SettingsNamePos;
std::string NameNeg;
std::string NamePos;
NAxisInfoT() {
}
std::string SettingsNameNeg{};
std::string SettingsNamePos{};
std::string NameNeg{};
std::string NamePos{};
};
struct NAxisInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -898,7 +915,6 @@ struct NAxisInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NAxisInfoBuilder &operator=(const NAxisInfoBuilder &);
flatbuffers::Offset<NAxisInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NAxisInfo>(end);
@ -942,11 +958,12 @@ flatbuffers::Offset<NAxisInfo> CreateNAxisInfo(flatbuffers::FlatBufferBuilder &_
struct NSwitchInfoT : public flatbuffers::NativeTable {
typedef NSwitchInfo TableType;
uint32_t DefaultPosition;
std::vector<std::unique_ptr<NymaTypes::NSwitchPositionT>> Positions;
NSwitchInfoT()
: DefaultPosition(0) {
}
uint32_t DefaultPosition = 0;
std::vector<std::unique_ptr<NymaTypes::NSwitchPositionT>> Positions{};
NSwitchInfoT() = default;
NSwitchInfoT(const NSwitchInfoT &o);
NSwitchInfoT(NSwitchInfoT&&) FLATBUFFERS_NOEXCEPT = default;
NSwitchInfoT &operator=(NSwitchInfoT o) FLATBUFFERS_NOEXCEPT;
};
struct NSwitchInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -964,7 +981,7 @@ struct NSwitchInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint32_t>(verifier, VT_DEFAULTPOSITION) &&
VerifyField<uint32_t>(verifier, VT_DEFAULTPOSITION, 4) &&
VerifyOffset(verifier, VT_POSITIONS) &&
verifier.VerifyVector(Positions()) &&
verifier.VerifyVectorOfTables(Positions()) &&
@ -989,7 +1006,6 @@ struct NSwitchInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NSwitchInfoBuilder &operator=(const NSwitchInfoBuilder &);
flatbuffers::Offset<NSwitchInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NSwitchInfo>(end);
@ -1022,11 +1038,9 @@ flatbuffers::Offset<NSwitchInfo> CreateNSwitchInfo(flatbuffers::FlatBufferBuilde
struct NSwitchPositionT : public flatbuffers::NativeTable {
typedef NSwitchPosition TableType;
std::string SettingName;
std::string Name;
std::string Description;
NSwitchPositionT() {
}
std::string SettingName{};
std::string Name{};
std::string Description{};
};
struct NSwitchPosition FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1078,7 +1092,6 @@ struct NSwitchPositionBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NSwitchPositionBuilder &operator=(const NSwitchPositionBuilder &);
flatbuffers::Offset<NSwitchPosition> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NSwitchPosition>(end);
@ -1117,9 +1130,11 @@ flatbuffers::Offset<NSwitchPosition> CreateNSwitchPosition(flatbuffers::FlatBuff
struct NStatusInfoT : public flatbuffers::NativeTable {
typedef NStatusInfo TableType;
std::vector<std::unique_ptr<NymaTypes::NStatusStateT>> States;
NStatusInfoT() {
}
std::vector<std::unique_ptr<NymaTypes::NStatusStateT>> States{};
NStatusInfoT() = default;
NStatusInfoT(const NStatusInfoT &o);
NStatusInfoT(NStatusInfoT&&) FLATBUFFERS_NOEXCEPT = default;
NStatusInfoT &operator=(NStatusInfoT o) FLATBUFFERS_NOEXCEPT;
};
struct NStatusInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1154,7 +1169,6 @@ struct NStatusInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NStatusInfoBuilder &operator=(const NStatusInfoBuilder &);
flatbuffers::Offset<NStatusInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NStatusInfo>(end);
@ -1183,12 +1197,9 @@ flatbuffers::Offset<NStatusInfo> CreateNStatusInfo(flatbuffers::FlatBufferBuilde
struct NStatusStateT : public flatbuffers::NativeTable {
typedef NStatusState TableType;
std::string ShortName;
std::string Name;
int32_t Color;
NStatusStateT()
: Color(0) {
}
std::string ShortName{};
std::string Name{};
int32_t Color = 0;
};
struct NStatusState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1214,7 +1225,7 @@ struct NStatusState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyString(ShortName()) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(Name()) &&
VerifyField<int32_t>(verifier, VT_COLOR) &&
VerifyField<int32_t>(verifier, VT_COLOR, 4) &&
verifier.EndTable();
}
NStatusStateT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@ -1239,7 +1250,6 @@ struct NStatusStateBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NStatusStateBuilder &operator=(const NStatusStateBuilder &);
flatbuffers::Offset<NStatusState> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NStatusState>(end);
@ -1277,21 +1287,14 @@ flatbuffers::Offset<NStatusState> CreateNStatusState(flatbuffers::FlatBufferBuil
struct NInputInfoT : public flatbuffers::NativeTable {
typedef NInputInfo TableType;
std::string SettingName;
std::string Name;
int16_t ConfigOrder;
uint16_t BitOffset;
NymaTypes::InputType Type;
NymaTypes::AxisFlags Flags;
uint8_t BitSize;
NymaTypes::NInputExtraUnion Extra;
NInputInfoT()
: ConfigOrder(0),
BitOffset(0),
Type(NymaTypes::InputType_Padding),
Flags(static_cast<NymaTypes::AxisFlags>(0)),
BitSize(0) {
}
std::string SettingName{};
std::string Name{};
int16_t ConfigOrder = 0;
uint16_t BitOffset = 0;
NymaTypes::InputType Type = NymaTypes::InputType_Padding;
NymaTypes::AxisFlags Flags = static_cast<NymaTypes::AxisFlags>(0);
uint8_t BitSize = 0;
NymaTypes::NInputExtraUnion Extra{};
};
struct NInputInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1354,12 +1357,12 @@ struct NInputInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyString(SettingName()) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(Name()) &&
VerifyField<int16_t>(verifier, VT_CONFIGORDER) &&
VerifyField<uint16_t>(verifier, VT_BITOFFSET) &&
VerifyField<uint8_t>(verifier, VT_TYPE) &&
VerifyField<uint8_t>(verifier, VT_FLAGS) &&
VerifyField<uint8_t>(verifier, VT_BITSIZE) &&
VerifyField<uint8_t>(verifier, VT_EXTRA_TYPE) &&
VerifyField<int16_t>(verifier, VT_CONFIGORDER, 2) &&
VerifyField<uint16_t>(verifier, VT_BITOFFSET, 2) &&
VerifyField<uint8_t>(verifier, VT_TYPE, 1) &&
VerifyField<uint8_t>(verifier, VT_FLAGS, 1) &&
VerifyField<uint8_t>(verifier, VT_BITSIZE, 1) &&
VerifyField<uint8_t>(verifier, VT_EXTRA_TYPE, 1) &&
VerifyOffset(verifier, VT_EXTRA) &&
VerifyNInputExtra(verifier, Extra(), Extra_type()) &&
verifier.EndTable();
@ -1420,7 +1423,6 @@ struct NInputInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NInputInfoBuilder &operator=(const NInputInfoBuilder &);
flatbuffers::Offset<NInputInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NInputInfo>(end);
@ -1482,16 +1484,16 @@ flatbuffers::Offset<NInputInfo> CreateNInputInfo(flatbuffers::FlatBufferBuilder
struct NDeviceInfoT : public flatbuffers::NativeTable {
typedef NDeviceInfo TableType;
std::string ShortName;
std::string FullName;
std::string Description;
NymaTypes::DeviceFlags Flags;
uint32_t ByteLength;
std::vector<std::unique_ptr<NymaTypes::NInputInfoT>> Inputs;
NDeviceInfoT()
: Flags(static_cast<NymaTypes::DeviceFlags>(0)),
ByteLength(0) {
}
std::string ShortName{};
std::string FullName{};
std::string Description{};
NymaTypes::DeviceFlags Flags = static_cast<NymaTypes::DeviceFlags>(0);
uint32_t ByteLength = 0;
std::vector<std::unique_ptr<NymaTypes::NInputInfoT>> Inputs{};
NDeviceInfoT() = default;
NDeviceInfoT(const NDeviceInfoT &o);
NDeviceInfoT(NDeviceInfoT&&) FLATBUFFERS_NOEXCEPT = default;
NDeviceInfoT &operator=(NDeviceInfoT o) FLATBUFFERS_NOEXCEPT;
};
struct NDeviceInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1531,8 +1533,8 @@ struct NDeviceInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyString(FullName()) &&
VerifyOffset(verifier, VT_DESCRIPTION) &&
verifier.VerifyString(Description()) &&
VerifyField<uint8_t>(verifier, VT_FLAGS) &&
VerifyField<uint32_t>(verifier, VT_BYTELENGTH) &&
VerifyField<uint8_t>(verifier, VT_FLAGS, 1) &&
VerifyField<uint32_t>(verifier, VT_BYTELENGTH, 4) &&
VerifyOffset(verifier, VT_INPUTS) &&
verifier.VerifyVector(Inputs()) &&
verifier.VerifyVectorOfTables(Inputs()) &&
@ -1569,7 +1571,6 @@ struct NDeviceInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NDeviceInfoBuilder &operator=(const NDeviceInfoBuilder &);
flatbuffers::Offset<NDeviceInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NDeviceInfo>(end);
@ -1621,12 +1622,14 @@ flatbuffers::Offset<NDeviceInfo> CreateNDeviceInfo(flatbuffers::FlatBufferBuilde
struct NPortInfoT : public flatbuffers::NativeTable {
typedef NPortInfo TableType;
std::string ShortName;
std::string FullName;
std::string DefaultDeviceShortName;
std::vector<std::unique_ptr<NymaTypes::NDeviceInfoT>> Devices;
NPortInfoT() {
}
std::string ShortName{};
std::string FullName{};
std::string DefaultDeviceShortName{};
std::vector<std::unique_ptr<NymaTypes::NDeviceInfoT>> Devices{};
NPortInfoT() = default;
NPortInfoT(const NPortInfoT &o);
NPortInfoT(NPortInfoT&&) FLATBUFFERS_NOEXCEPT = default;
NPortInfoT &operator=(NPortInfoT o) FLATBUFFERS_NOEXCEPT;
};
struct NPortInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1688,7 +1691,6 @@ struct NPortInfoBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NPortInfoBuilder &operator=(const NPortInfoBuilder &);
flatbuffers::Offset<NPortInfo> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NPortInfo>(end);
@ -1732,9 +1734,11 @@ flatbuffers::Offset<NPortInfo> CreateNPortInfo(flatbuffers::FlatBufferBuilder &_
struct NPortsT : public flatbuffers::NativeTable {
typedef NPorts TableType;
std::vector<std::unique_ptr<NymaTypes::NPortInfoT>> Values;
NPortsT() {
}
std::vector<std::unique_ptr<NymaTypes::NPortInfoT>> Values{};
NPortsT() = default;
NPortsT(const NPortsT &o);
NPortsT(NPortsT&&) FLATBUFFERS_NOEXCEPT = default;
NPortsT &operator=(NPortsT o) FLATBUFFERS_NOEXCEPT;
};
struct NPorts FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1769,7 +1773,6 @@ struct NPortsBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NPortsBuilder &operator=(const NPortsBuilder &);
flatbuffers::Offset<NPorts> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NPorts>(end);
@ -1797,7 +1800,7 @@ inline flatbuffers::Offset<NPorts> CreateNPortsDirect(
flatbuffers::Offset<NPorts> CreateNPorts(flatbuffers::FlatBufferBuilder &_fbb, const NPortsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
inline EnumValueT *EnumValue::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::EnumValueT> _o = std::unique_ptr<NymaTypes::EnumValueT>(new EnumValueT());
auto _o = std::unique_ptr<EnumValueT>(new EnumValueT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -1828,8 +1831,34 @@ inline flatbuffers::Offset<EnumValue> CreateEnumValue(flatbuffers::FlatBufferBui
_Value);
}
inline SettingT::SettingT(const SettingT &o)
: Name(o.Name),
Description(o.Description),
SettingsKey(o.SettingsKey),
DefaultValue(o.DefaultValue),
Min(o.Min),
Max(o.Max),
Flags(o.Flags),
Type(o.Type) {
SettingEnums.reserve(o.SettingEnums.size());
for (const auto &SettingEnums_ : o.SettingEnums) { SettingEnums.emplace_back((SettingEnums_) ? new NymaTypes::EnumValueT(*SettingEnums_) : nullptr); }
}
inline SettingT &SettingT::operator=(SettingT o) FLATBUFFERS_NOEXCEPT {
std::swap(Name, o.Name);
std::swap(Description, o.Description);
std::swap(SettingsKey, o.SettingsKey);
std::swap(DefaultValue, o.DefaultValue);
std::swap(Min, o.Min);
std::swap(Max, o.Max);
std::swap(Flags, o.Flags);
std::swap(Type, o.Type);
std::swap(SettingEnums, o.SettingEnums);
return *this;
}
inline SettingT *Setting::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::SettingT> _o = std::unique_ptr<NymaTypes::SettingT>(new SettingT());
auto _o = std::unique_ptr<SettingT>(new SettingT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -1845,7 +1874,7 @@ inline void Setting::UnPackTo(SettingT *_o, const flatbuffers::resolver_function
{ auto _e = Max(); if (_e) _o->Max = _e->str(); }
{ auto _e = Flags(); _o->Flags = _e; }
{ auto _e = Type(); _o->Type = _e; }
{ auto _e = SettingEnums(); if (_e) { _o->SettingEnums.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->SettingEnums[_i] = std::unique_ptr<NymaTypes::EnumValueT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = SettingEnums(); if (_e) { _o->SettingEnums.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->SettingEnums[_i]) { _e->Get(_i)->UnPackTo(_o->SettingEnums[_i].get(), _resolver); } else { _o->SettingEnums[_i] = std::unique_ptr<NymaTypes::EnumValueT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->SettingEnums.resize(0); } }
}
inline flatbuffers::Offset<Setting> Setting::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SettingT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -1878,8 +1907,18 @@ inline flatbuffers::Offset<Setting> CreateSetting(flatbuffers::FlatBufferBuilder
_SettingEnums);
}
inline SettingsT::SettingsT(const SettingsT &o) {
Values.reserve(o.Values.size());
for (const auto &Values_ : o.Values) { Values.emplace_back((Values_) ? new NymaTypes::SettingT(*Values_) : nullptr); }
}
inline SettingsT &SettingsT::operator=(SettingsT o) FLATBUFFERS_NOEXCEPT {
std::swap(Values, o.Values);
return *this;
}
inline SettingsT *Settings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::SettingsT> _o = std::unique_ptr<NymaTypes::SettingsT>(new SettingsT());
auto _o = std::unique_ptr<SettingsT>(new SettingsT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -1887,7 +1926,7 @@ inline SettingsT *Settings::UnPack(const flatbuffers::resolver_function_t *_reso
inline void Settings::UnPackTo(SettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = Values(); if (_e) { _o->Values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Values[_i] = std::unique_ptr<NymaTypes::SettingT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = Values(); if (_e) { _o->Values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->Values[_i]) { _e->Get(_i)->UnPackTo(_o->Values[_i].get(), _resolver); } else { _o->Values[_i] = std::unique_ptr<NymaTypes::SettingT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->Values.resize(0); } }
}
inline flatbuffers::Offset<Settings> Settings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -1905,7 +1944,7 @@ inline flatbuffers::Offset<Settings> CreateSettings(flatbuffers::FlatBufferBuild
}
inline NButtonInfoT *NButtonInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NButtonInfoT> _o = std::unique_ptr<NymaTypes::NButtonInfoT>(new NButtonInfoT());
auto _o = std::unique_ptr<NButtonInfoT>(new NButtonInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -1931,7 +1970,7 @@ inline flatbuffers::Offset<NButtonInfo> CreateNButtonInfo(flatbuffers::FlatBuffe
}
inline NAxisInfoT *NAxisInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NAxisInfoT> _o = std::unique_ptr<NymaTypes::NAxisInfoT>(new NAxisInfoT());
auto _o = std::unique_ptr<NAxisInfoT>(new NAxisInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -1965,8 +2004,20 @@ inline flatbuffers::Offset<NAxisInfo> CreateNAxisInfo(flatbuffers::FlatBufferBui
_NamePos);
}
inline NSwitchInfoT::NSwitchInfoT(const NSwitchInfoT &o)
: DefaultPosition(o.DefaultPosition) {
Positions.reserve(o.Positions.size());
for (const auto &Positions_ : o.Positions) { Positions.emplace_back((Positions_) ? new NymaTypes::NSwitchPositionT(*Positions_) : nullptr); }
}
inline NSwitchInfoT &NSwitchInfoT::operator=(NSwitchInfoT o) FLATBUFFERS_NOEXCEPT {
std::swap(DefaultPosition, o.DefaultPosition);
std::swap(Positions, o.Positions);
return *this;
}
inline NSwitchInfoT *NSwitchInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NSwitchInfoT> _o = std::unique_ptr<NymaTypes::NSwitchInfoT>(new NSwitchInfoT());
auto _o = std::unique_ptr<NSwitchInfoT>(new NSwitchInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -1975,7 +2026,7 @@ inline void NSwitchInfo::UnPackTo(NSwitchInfoT *_o, const flatbuffers::resolver_
(void)_o;
(void)_resolver;
{ auto _e = DefaultPosition(); _o->DefaultPosition = _e; }
{ auto _e = Positions(); if (_e) { _o->Positions.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Positions[_i] = std::unique_ptr<NymaTypes::NSwitchPositionT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = Positions(); if (_e) { _o->Positions.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->Positions[_i]) { _e->Get(_i)->UnPackTo(_o->Positions[_i].get(), _resolver); } else { _o->Positions[_i] = std::unique_ptr<NymaTypes::NSwitchPositionT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->Positions.resize(0); } }
}
inline flatbuffers::Offset<NSwitchInfo> NSwitchInfo::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NSwitchInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -1995,7 +2046,7 @@ inline flatbuffers::Offset<NSwitchInfo> CreateNSwitchInfo(flatbuffers::FlatBuffe
}
inline NSwitchPositionT *NSwitchPosition::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NSwitchPositionT> _o = std::unique_ptr<NymaTypes::NSwitchPositionT>(new NSwitchPositionT());
auto _o = std::unique_ptr<NSwitchPositionT>(new NSwitchPositionT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2026,8 +2077,18 @@ inline flatbuffers::Offset<NSwitchPosition> CreateNSwitchPosition(flatbuffers::F
_Description);
}
inline NStatusInfoT::NStatusInfoT(const NStatusInfoT &o) {
States.reserve(o.States.size());
for (const auto &States_ : o.States) { States.emplace_back((States_) ? new NymaTypes::NStatusStateT(*States_) : nullptr); }
}
inline NStatusInfoT &NStatusInfoT::operator=(NStatusInfoT o) FLATBUFFERS_NOEXCEPT {
std::swap(States, o.States);
return *this;
}
inline NStatusInfoT *NStatusInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NStatusInfoT> _o = std::unique_ptr<NymaTypes::NStatusInfoT>(new NStatusInfoT());
auto _o = std::unique_ptr<NStatusInfoT>(new NStatusInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2035,7 +2096,7 @@ inline NStatusInfoT *NStatusInfo::UnPack(const flatbuffers::resolver_function_t
inline void NStatusInfo::UnPackTo(NStatusInfoT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = States(); if (_e) { _o->States.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->States[_i] = std::unique_ptr<NymaTypes::NStatusStateT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = States(); if (_e) { _o->States.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->States[_i]) { _e->Get(_i)->UnPackTo(_o->States[_i].get(), _resolver); } else { _o->States[_i] = std::unique_ptr<NymaTypes::NStatusStateT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->States.resize(0); } }
}
inline flatbuffers::Offset<NStatusInfo> NStatusInfo::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NStatusInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -2053,7 +2114,7 @@ inline flatbuffers::Offset<NStatusInfo> CreateNStatusInfo(flatbuffers::FlatBuffe
}
inline NStatusStateT *NStatusState::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NStatusStateT> _o = std::unique_ptr<NymaTypes::NStatusStateT>(new NStatusStateT());
auto _o = std::unique_ptr<NStatusStateT>(new NStatusStateT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2085,7 +2146,7 @@ inline flatbuffers::Offset<NStatusState> CreateNStatusState(flatbuffers::FlatBuf
}
inline NInputInfoT *NInputInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NInputInfoT> _o = std::unique_ptr<NymaTypes::NInputInfoT>(new NInputInfoT());
auto _o = std::unique_ptr<NInputInfoT>(new NInputInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2134,8 +2195,28 @@ inline flatbuffers::Offset<NInputInfo> CreateNInputInfo(flatbuffers::FlatBufferB
_Extra);
}
inline NDeviceInfoT::NDeviceInfoT(const NDeviceInfoT &o)
: ShortName(o.ShortName),
FullName(o.FullName),
Description(o.Description),
Flags(o.Flags),
ByteLength(o.ByteLength) {
Inputs.reserve(o.Inputs.size());
for (const auto &Inputs_ : o.Inputs) { Inputs.emplace_back((Inputs_) ? new NymaTypes::NInputInfoT(*Inputs_) : nullptr); }
}
inline NDeviceInfoT &NDeviceInfoT::operator=(NDeviceInfoT o) FLATBUFFERS_NOEXCEPT {
std::swap(ShortName, o.ShortName);
std::swap(FullName, o.FullName);
std::swap(Description, o.Description);
std::swap(Flags, o.Flags);
std::swap(ByteLength, o.ByteLength);
std::swap(Inputs, o.Inputs);
return *this;
}
inline NDeviceInfoT *NDeviceInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NDeviceInfoT> _o = std::unique_ptr<NymaTypes::NDeviceInfoT>(new NDeviceInfoT());
auto _o = std::unique_ptr<NDeviceInfoT>(new NDeviceInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2148,7 +2229,7 @@ inline void NDeviceInfo::UnPackTo(NDeviceInfoT *_o, const flatbuffers::resolver_
{ auto _e = Description(); if (_e) _o->Description = _e->str(); }
{ auto _e = Flags(); _o->Flags = _e; }
{ auto _e = ByteLength(); _o->ByteLength = _e; }
{ auto _e = Inputs(); if (_e) { _o->Inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Inputs[_i] = std::unique_ptr<NymaTypes::NInputInfoT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = Inputs(); if (_e) { _o->Inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->Inputs[_i]) { _e->Get(_i)->UnPackTo(_o->Inputs[_i].get(), _resolver); } else { _o->Inputs[_i] = std::unique_ptr<NymaTypes::NInputInfoT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->Inputs.resize(0); } }
}
inline flatbuffers::Offset<NDeviceInfo> NDeviceInfo::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NDeviceInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -2175,8 +2256,24 @@ inline flatbuffers::Offset<NDeviceInfo> CreateNDeviceInfo(flatbuffers::FlatBuffe
_Inputs);
}
inline NPortInfoT::NPortInfoT(const NPortInfoT &o)
: ShortName(o.ShortName),
FullName(o.FullName),
DefaultDeviceShortName(o.DefaultDeviceShortName) {
Devices.reserve(o.Devices.size());
for (const auto &Devices_ : o.Devices) { Devices.emplace_back((Devices_) ? new NymaTypes::NDeviceInfoT(*Devices_) : nullptr); }
}
inline NPortInfoT &NPortInfoT::operator=(NPortInfoT o) FLATBUFFERS_NOEXCEPT {
std::swap(ShortName, o.ShortName);
std::swap(FullName, o.FullName);
std::swap(DefaultDeviceShortName, o.DefaultDeviceShortName);
std::swap(Devices, o.Devices);
return *this;
}
inline NPortInfoT *NPortInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NPortInfoT> _o = std::unique_ptr<NymaTypes::NPortInfoT>(new NPortInfoT());
auto _o = std::unique_ptr<NPortInfoT>(new NPortInfoT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2187,7 +2284,7 @@ inline void NPortInfo::UnPackTo(NPortInfoT *_o, const flatbuffers::resolver_func
{ auto _e = ShortName(); if (_e) _o->ShortName = _e->str(); }
{ auto _e = FullName(); if (_e) _o->FullName = _e->str(); }
{ auto _e = DefaultDeviceShortName(); if (_e) _o->DefaultDeviceShortName = _e->str(); }
{ auto _e = Devices(); if (_e) { _o->Devices.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Devices[_i] = std::unique_ptr<NymaTypes::NDeviceInfoT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = Devices(); if (_e) { _o->Devices.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->Devices[_i]) { _e->Get(_i)->UnPackTo(_o->Devices[_i].get(), _resolver); } else { _o->Devices[_i] = std::unique_ptr<NymaTypes::NDeviceInfoT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->Devices.resize(0); } }
}
inline flatbuffers::Offset<NPortInfo> NPortInfo::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NPortInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -2210,8 +2307,18 @@ inline flatbuffers::Offset<NPortInfo> CreateNPortInfo(flatbuffers::FlatBufferBui
_Devices);
}
inline NPortsT::NPortsT(const NPortsT &o) {
Values.reserve(o.Values.size());
for (const auto &Values_ : o.Values) { Values.emplace_back((Values_) ? new NymaTypes::NPortInfoT(*Values_) : nullptr); }
}
inline NPortsT &NPortsT::operator=(NPortsT o) FLATBUFFERS_NOEXCEPT {
std::swap(Values, o.Values);
return *this;
}
inline NPortsT *NPorts::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
std::unique_ptr<NymaTypes::NPortsT> _o = std::unique_ptr<NymaTypes::NPortsT>(new NPortsT());
auto _o = std::unique_ptr<NPortsT>(new NPortsT());
UnPackTo(_o.get(), _resolver);
return _o.release();
}
@ -2219,7 +2326,7 @@ inline NPortsT *NPorts::UnPack(const flatbuffers::resolver_function_t *_resolver
inline void NPorts::UnPackTo(NPortsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = Values(); if (_e) { _o->Values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Values[_i] = std::unique_ptr<NymaTypes::NPortInfoT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = Values(); if (_e) { _o->Values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->Values[_i]) { _e->Get(_i)->UnPackTo(_o->Values[_i].get(), _resolver); } else { _o->Values[_i] = std::unique_ptr<NymaTypes::NPortInfoT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->Values.resize(0); } }
}
inline flatbuffers::Offset<NPorts> NPorts::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NPortsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@ -2274,6 +2381,7 @@ inline bool VerifyNInputExtraVector(flatbuffers::Verifier &verifier, const flatb
}
inline void *NInputExtraUnion::UnPack(const void *obj, NInputExtra type, const flatbuffers::resolver_function_t *resolver) {
(void)resolver;
switch (type) {
case NInputExtra_Button: {
auto ptr = reinterpret_cast<const NymaTypes::NButtonInfo *>(obj);
@ -2296,6 +2404,7 @@ inline void *NInputExtraUnion::UnPack(const void *obj, NInputExtra type, const f
}
inline flatbuffers::Offset<void> NInputExtraUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
(void)_rehasher;
switch (type) {
case NInputExtra_Button: {
auto ptr = reinterpret_cast<const NymaTypes::NButtonInfoT *>(value);
@ -2328,11 +2437,11 @@ inline NInputExtraUnion::NInputExtraUnion(const NInputExtraUnion &u) : type(u.ty
break;
}
case NInputExtra_Switch: {
FLATBUFFERS_ASSERT(false); // NymaTypes::NSwitchInfoT not copyable.
value = new NymaTypes::NSwitchInfoT(*reinterpret_cast<NymaTypes::NSwitchInfoT *>(u.value));
break;
}
case NInputExtra_Status: {
FLATBUFFERS_ASSERT(false); // NymaTypes::NStatusInfoT not copyable.
value = new NymaTypes::NStatusInfoT(*reinterpret_cast<NymaTypes::NStatusInfoT *>(u.value));
break;
}
default:

View File

@ -0,0 +1,68 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ALLOCATOR_H_
#define FLATBUFFERS_ALLOCATOR_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Allocator interface. This is flatbuffers-specific and meant only for
// `vector_downward` usage.
class Allocator {
public:
virtual ~Allocator() {}
// Allocate `size` bytes of memory.
virtual uint8_t *allocate(size_t size) = 0;
// Deallocate `size` bytes of memory at `p` allocated by this allocator.
virtual void deallocate(uint8_t *p, size_t size) = 0;
// Reallocate `new_size` bytes of memory, replacing the old region of size
// `old_size` at `p`. In contrast to a normal realloc, this grows downwards,
// and is intended specifcally for `vector_downward` use.
// `in_use_back` and `in_use_front` indicate how much of `old_size` is
// actually in use at each end, and needs to be copied.
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows
uint8_t *new_p = allocate(new_size);
memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
in_use_front);
deallocate(old_p, old_size);
return new_p;
}
protected:
// Called by `reallocate_downward` to copy memory from `old_p` of `old_size`
// to `new_p` of `new_size`. Only memory of size `in_use_front` and
// `in_use_back` will be copied from the front and back of the old memory
// allocation.
void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back,
in_use_back);
memcpy(new_p, old_p, in_use_front);
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_ALLOCATOR_H_

View File

@ -0,0 +1,243 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ARRAY_H_
#define FLATBUFFERS_ARRAY_H_
#include "flatbuffers/base.h"
#include "flatbuffers/stl_emulation.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// This is used as a helper type for accessing arrays.
template<typename T, uint16_t length> class Array {
// Array<T> can carry only POD data types (scalars or structs).
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
typedef
typename flatbuffers::conditional<scalar_tag::value, T, const T *>::type
IndirectHelperType;
public:
typedef uint16_t size_type;
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
typedef VectorIterator<T, return_type> const_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
// If T is a LE-scalar or a struct (!scalar_tag::value).
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
(scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1)) ||
!scalar_tag::value;
FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; }
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<IndirectHelperType>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
const_iterator begin() const { return const_iterator(Data(), 0); }
const_iterator end() const { return const_iterator(Data(), size()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Get a mutable pointer to elements inside this array.
// This method used to mutate arrays of structs followed by a @p Mutate
// operation. For primitive types use @p Mutate directly.
// @warning Assignments and reads to/from the dereferenced pointer are not
// automatically converted to the correct endianness.
typename flatbuffers::conditional<scalar_tag::value, void, T *>::type
GetMutablePointer(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<T *>(&data()[i]);
}
// Change elements if you have a non-const pointer to this object.
void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); }
// The raw data in little endian format. Use with care.
const uint8_t *Data() const { return data_; }
uint8_t *Data() { return data_; }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
// Copy data from a span with endian conversion.
// If this Array and the span overlap, the behavior is undefined.
void CopyFromSpan(flatbuffers::span<const T, length> src) {
const auto p1 = reinterpret_cast<const uint8_t *>(src.data());
const auto p2 = Data();
FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) &&
!(p2 >= p1 && p2 < (p1 + length)));
(void)p1;
(void)p2;
CopyFromSpanImpl(flatbuffers::bool_constant<is_span_observable>(), src);
}
protected:
void MutateImpl(flatbuffers::true_type, uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
void MutateImpl(flatbuffers::false_type, uoffset_t i, const T &val) {
*(GetMutablePointer(i)) = val;
}
void CopyFromSpanImpl(flatbuffers::true_type,
flatbuffers::span<const T, length> src) {
// Use std::memcpy() instead of std::copy() to avoid performance degradation
// due to aliasing if T is char or unsigned char.
// The size is known at compile time, so memcpy would be inlined.
std::memcpy(data(), src.data(), length * sizeof(T));
}
// Copy data from flatbuffers::span with endian conversion.
void CopyFromSpanImpl(flatbuffers::false_type,
flatbuffers::span<const T, length> src) {
for (size_type k = 0; k < length; k++) { Mutate(k, src[k]); }
}
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
// 'constexpr' allows us to use 'size()' at compile time.
// @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on
// a constructor.
#if defined(__cpp_constexpr)
constexpr Array();
#else
Array();
#endif
uint8_t data_[length * sizeof(T)];
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Array(const Array &);
Array &operator=(const Array &);
};
// Specialization for Array[struct] with access using Offset<void> pointer.
// This specialization used by idl_gen_text.cpp.
template<typename T, uint16_t length> class Array<Offset<T>, length> {
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
public:
typedef const void *return_type;
const uint8_t *Data() const { return data_; }
// Make idl_gen_text.cpp::PrintContainer happy.
return_type operator[](uoffset_t) const {
FLATBUFFERS_ASSERT(false);
return nullptr;
}
private:
// This class is only used to access pre-existing data.
Array();
Array(const Array &);
Array &operator=(const Array &);
uint8_t data_[1];
};
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U, N> make_span(Array<U, N> &arr)
FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U, N> make_span(
const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<const U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t, sizeof(U) * N>
make_bytes_span(Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t, sizeof(U) * N>
make_bytes_span(const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<const uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
// Cast a raw T[length] to a raw flatbuffers::Array<T, length>
// without endian conversion. Use with care.
// TODO: move these Cast-methods to `internal` namespace.
template<typename T, uint16_t length>
Array<T, length> &CastToArray(T (&arr)[length]) {
return *reinterpret_cast<Array<T, length> *>(arr);
}
template<typename T, uint16_t length>
const Array<T, length> &CastToArray(const T (&arr)[length]) {
return *reinterpret_cast<const Array<T, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
Array<E, length> &CastToArrayOfEnum(T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<Array<E, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
const Array<E, length> &CastToArrayOfEnum(const T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<const Array<E, length> *>(arr);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_ARRAY_H_

View File

@ -46,14 +46,13 @@
#include <iterator>
#include <memory>
#ifdef _STLPORT_VERSION
#define FLATBUFFERS_CPP98_STL
#endif
#ifndef FLATBUFFERS_CPP98_STL
#include <functional>
#if defined(__unix__) && !defined(FLATBUFFERS_LOCALE_INDEPENDENT)
#include <unistd.h>
#endif
#include "flatbuffers/stl_emulation.h"
#ifdef __ANDROID__
#include <android/api-level.h>
#endif
#if defined(__ICCARM__)
#include <intrinsics.h>
@ -139,9 +138,9 @@
#endif
#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
#define FLATBUFFERS_VERSION_MAJOR 1
#define FLATBUFFERS_VERSION_MINOR 12
#define FLATBUFFERS_VERSION_REVISION 0
#define FLATBUFFERS_VERSION_MAJOR 22
#define FLATBUFFERS_VERSION_MINOR 9
#define FLATBUFFERS_VERSION_REVISION 24
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
namespace flatbuffers {
@ -154,10 +153,12 @@ namespace flatbuffers {
defined(__clang__)
#define FLATBUFFERS_FINAL_CLASS final
#define FLATBUFFERS_OVERRIDE override
#define FLATBUFFERS_EXPLICIT_CPP11 explicit
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t
#else
#define FLATBUFFERS_FINAL_CLASS
#define FLATBUFFERS_OVERRIDE
#define FLATBUFFERS_EXPLICIT_CPP11
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE
#endif
@ -165,13 +166,16 @@ namespace flatbuffers {
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
(defined(__cpp_constexpr) && __cpp_constexpr >= 200704)
#define FLATBUFFERS_CONSTEXPR constexpr
#define FLATBUFFERS_CONSTEXPR_CPP11 constexpr
#define FLATBUFFERS_CONSTEXPR_DEFINED
#else
#define FLATBUFFERS_CONSTEXPR const
#define FLATBUFFERS_CONSTEXPR_CPP11
#endif
#if (defined(__cplusplus) && __cplusplus >= 201402L) || \
(defined(__cpp_constexpr) && __cpp_constexpr >= 201304)
#define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR
#define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR_CPP11
#else
#define FLATBUFFERS_CONSTEXPR_CPP14
#endif
@ -189,9 +193,25 @@ namespace flatbuffers {
#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \
defined(__clang__)
#define FLATBUFFERS_DELETE_FUNC(func) func = delete;
#define FLATBUFFERS_DELETE_FUNC(func) func = delete
#else
#define FLATBUFFERS_DELETE_FUNC(func) private: func;
#define FLATBUFFERS_DELETE_FUNC(func) private: func
#endif
#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
defined(__clang__)
#define FLATBUFFERS_DEFAULT_DECLARATION
#endif
// Check if we can use template aliases
// Not possible if Microsoft Compiler before 2012
// Possible is the language feature __cpp_alias_templates is defined well
// Or possible if the C++ std is C+11 or newer
#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
|| (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
|| (defined(__cplusplus) && __cplusplus >= 201103L)
#define FLATBUFFERS_TEMPLATES_ALIASES
#endif
#ifndef FLATBUFFERS_HAS_STRING_VIEW
@ -223,6 +243,11 @@ namespace flatbuffers {
#endif // __has_include
#endif // !FLATBUFFERS_HAS_STRING_VIEW
#ifndef FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
// Allow heap allocations to be used
#define FLATBUFFERS_GENERAL_HEAP_ALLOC_OK 1
#endif // !FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
#ifndef FLATBUFFERS_HAS_NEW_STRTOD
// Modern (C++11) strtod and strtof functions are available for use.
// 1) nan/inf strings as argument of strtod;
@ -235,11 +260,12 @@ namespace flatbuffers {
#endif // !FLATBUFFERS_HAS_NEW_STRTOD
#ifndef FLATBUFFERS_LOCALE_INDEPENDENT
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}.
// They are part of the POSIX-2008 but not part of the C/C++ standard.
// GCC/Clang have definition (_XOPEN_SOURCE>=700) if POSIX-2008.
#if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \
(defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE>=700)))
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l,
// strtoull_l}.
#if (defined(_MSC_VER) && _MSC_VER >= 1800) || \
(defined(__ANDROID_API__) && __ANDROID_API__>= 21) || \
(defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)) && \
(!defined(__Fuchsia__) && !defined(__ANDROID_API__))
#define FLATBUFFERS_LOCALE_INDEPENDENT 1
#else
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
@ -247,14 +273,14 @@ namespace flatbuffers {
#endif // !FLATBUFFERS_LOCALE_INDEPENDENT
// Suppress Undefined Behavior Sanitizer (recoverable only). Usage:
// - __supress_ubsan__("undefined")
// - __supress_ubsan__("signed-integer-overflow")
// - __suppress_ubsan__("undefined")
// - __suppress_ubsan__("signed-integer-overflow")
#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7))
#define __supress_ubsan__(type) __attribute__((no_sanitize(type)))
#define __suppress_ubsan__(type) __attribute__((no_sanitize(type)))
#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)
#define __supress_ubsan__(type) __attribute__((no_sanitize_undefined))
#define __suppress_ubsan__(type) __attribute__((no_sanitize_undefined))
#else
#define __supress_ubsan__(type)
#define __suppress_ubsan__(type)
#endif
// This is constexpr function used for checking compile-time constants.
@ -267,7 +293,7 @@ template<typename T> FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) {
#if ((__cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L)))
// All attributes unknown to an implementation are ignored without causing an error.
#define FLATBUFFERS_ATTRIBUTE(attr) [[attr]]
#define FLATBUFFERS_ATTRIBUTE(attr) attr
#define FLATBUFFERS_FALLTHROUGH() [[fallthrough]]
#else
@ -305,10 +331,28 @@ typedef uintmax_t largest_scalar_t;
// In 32bits, this evaluates to 2GB - 1
#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
// The minimum size buffer that can be a valid flatbuffer.
// Includes the offset to the root table (uoffset_t), the offset to the vtable
// of the root table (soffset_t), the size of the vtable (uint16_t), and the
// size of the referring table (uint16_t).
#define FLATBUFFERS_MIN_BUFFER_SIZE sizeof(uoffset_t) + sizeof(soffset_t) + \
sizeof(uint16_t) + sizeof(uint16_t)
// We support aligning the contents of buffers up to this size.
#define FLATBUFFERS_MAX_ALIGNMENT 16
#ifndef FLATBUFFERS_MAX_ALIGNMENT
#define FLATBUFFERS_MAX_ALIGNMENT 32
#endif
/// @brief The length of a FlatBuffer file header.
static const size_t kFileIdentifierLength = 4;
inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) {
return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) &&
(align & (align - 1)) == 0; // must be power of 2
}
#if defined(_MSC_VER)
#pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized
#pragma warning(push)
#pragma warning(disable: 4127) // C4127: conditional expression is constant
#endif
@ -369,30 +413,74 @@ template<typename T> T EndianScalar(T t) {
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__supress_ubsan__("alignment")
__suppress_ubsan__("alignment")
T ReadScalar(const void *p) {
return EndianScalar(*reinterpret_cast<const T *>(p));
}
// See https://github.com/google/flatbuffers/issues/5950
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__supress_ubsan__("alignment")
__suppress_ubsan__("alignment")
void WriteScalar(void *p, T t) {
*reinterpret_cast<T *>(p) = EndianScalar(t);
}
template<typename T> struct Offset;
template<typename T> __supress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
template<typename T> __suppress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
*reinterpret_cast<uoffset_t *>(p) = EndianScalar(t.o);
}
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
#pragma GCC diagnostic pop
#endif
// Computes how many bytes you'd have to pad to be able to write an
// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
// memory).
__supress_ubsan__("unsigned-integer-overflow")
__suppress_ubsan__("unsigned-integer-overflow")
inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
return ((~buf_size) + 1) & (scalar_size - 1);
}
// Generic 'operator==' with conditional specialisations.
// T e - new value of a scalar field.
// T def - default of scalar (is known at compile-time).
template<typename T> inline bool IsTheSameAs(T e, T def) { return e == def; }
#if defined(FLATBUFFERS_NAN_DEFAULTS) && \
defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Like `operator==(e, def)` with weak NaN if T=(float|double).
template<typename T> inline bool IsFloatTheSameAs(T e, T def) {
return (e == def) || ((def != def) && (e != e));
}
template<> inline bool IsTheSameAs<float>(float e, float def) {
return IsFloatTheSameAs(e, def);
}
template<> inline bool IsTheSameAs<double>(double e, double def) {
return IsFloatTheSameAs(e, def);
}
#endif
// Check 'v' is out of closed range [low; high].
// Workaround for GCC warning [-Werror=type-limits]:
// comparison is always true due to limited range of data type.
template<typename T>
inline bool IsOutRange(const T &v, const T &low, const T &high) {
return (v < low) || (high < v);
}
// Check 'v' is in closed range [low; high].
template<typename T>
inline bool IsInRange(const T &v, const T &low, const T &high) {
return !IsOutRange(v, low, high);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BASE_H_

View File

@ -0,0 +1,43 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BFBS_GENERATOR_H_
#define FLATBUFFERS_BFBS_GENERATOR_H_
#include <cstdint>
namespace flatbuffers {
enum GeneratorStatus {
OK,
FAILED,
FAILED_VERIFICATION,
};
// A Flatbuffer Code Generator that receives a binary serialized reflection.fbs
// and generates code from it.
class BfbsGenerator {
public:
virtual ~BfbsGenerator() {}
// Generate code from the provided `buffer` of given `length`. The buffer is
// a serialized reflection.fbs.
virtual GeneratorStatus Generate(const uint8_t *buffer, int64_t length) = 0;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_BFBS_GENERATOR_H_

View File

@ -0,0 +1,142 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_H_
#define FLATBUFFERS_BUFFER_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Wrapper for uoffset_t to allow safe template specialization.
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
template<typename T> struct Offset {
uoffset_t o;
Offset() : o(0) {}
Offset(uoffset_t _o) : o(_o) {}
Offset<void> Union() const { return Offset<void>(o); }
bool IsNull() const { return !o; }
};
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
FLATBUFFERS_ASSERT(*reinterpret_cast<char *>(&endiantest) ==
FLATBUFFERS_LITTLEENDIAN);
(void)endiantest;
}
template<typename T> FLATBUFFERS_CONSTEXPR size_t AlignOf() {
// clang-format off
#ifdef _MSC_VER
return __alignof(T);
#else
#ifndef alignof
return __alignof__(T);
#else
return alignof(T);
#endif
#endif
// clang-format on
}
// Lexicographically compare two strings (possibly containing nulls), and
// return true if the first is less than the second.
static inline bool StringLessThan(const char *a_data, uoffset_t a_size,
const char *b_data, uoffset_t b_size) {
const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size));
return cmp == 0 ? a_size < b_size : cmp < 0;
}
// When we read serialized data from memory, in the case of most scalars,
// we want to just read T, but in the case of Offset, we want to actually
// perform the indirection and return a pointer.
// The template specialization below does just that.
// It is wrapped in a struct since function templates can't overload on the
// return type like this.
// The typedef is for the convenience of callers of this function
// (avoiding the need for a trailing return decltype)
template<typename T> struct IndirectHelper {
typedef T return_type;
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
}
};
template<typename T> struct IndirectHelper<Offset<T>> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(uoffset_t);
static return_type Read(const uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
}
};
template<typename T> struct IndirectHelper<const T *> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return reinterpret_cast<const T *>(p + i * sizeof(T));
}
};
/// @brief Get a pointer to the file_identifier section of the buffer.
/// @return Returns a const char pointer to the start of the file_identifier
/// characters in the buffer. The returned char * has length
/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'.
/// This function is UNDEFINED for FlatBuffers whose schema does not include
/// a file_identifier (likely points at padding or the start of a the root
/// vtable).
inline const char *GetBufferIdentifier(const void *buf,
bool size_prefixed = false) {
return reinterpret_cast<const char *>(buf) +
((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t));
}
// Helper to see if the identifier in a buffer has the expected value.
inline bool BufferHasIdentifier(const void *buf, const char *identifier,
bool size_prefixed = false) {
return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier,
flatbuffers::kFileIdentifierLength) == 0;
}
/// @cond FLATBUFFERS_INTERNAL
// Helpers to get a typed pointer to the root object contained in the buffer.
template<typename T> T *GetMutableRoot(void *buf) {
EndianCheck();
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(buf) +
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
}
template<typename T> T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) +
sizeof(uoffset_t));
}
template<typename T> const T *GetRoot(const void *buf) {
return GetMutableRoot<T>(const_cast<void *>(buf));
}
template<typename T> const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_H_

View File

@ -0,0 +1,53 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_REF_H_
#define FLATBUFFERS_BUFFER_REF_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// Convenient way to bundle a buffer and its length, to pass it around
// typed by its root.
// A BufferRef does not own its buffer.
struct BufferRefBase {}; // for std::is_base_of
template<typename T> struct BufferRef : BufferRefBase {
BufferRef() : buf(nullptr), len(0), must_free(false) {}
BufferRef(uint8_t *_buf, uoffset_t _len)
: buf(_buf), len(_len), must_free(false) {}
~BufferRef() {
if (must_free) free(buf);
}
const T *GetRoot() const { return flatbuffers::GetRoot<T>(buf); }
bool Verify() {
Verifier verifier(buf, len);
return verifier.VerifyBuffer<T>(nullptr);
}
uint8_t *buf;
uoffset_t len;
bool must_free;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_REF_H_

View File

@ -61,7 +61,7 @@ class CodeWriter {
}
// Appends the given text to the generated code as well as a newline
// character. Any text within {{ and }} delimeters is replaced by values
// character. Any text within {{ and }} delimiters is replaced by values
// previously stored in the CodeWriter by calling SetValue above. The newline
// will be suppressed if the text ends with the \\ character.
void operator+=(std::string text);
@ -76,6 +76,8 @@ class CodeWriter {
if (cur_ident_lvl_) cur_ident_lvl_--;
}
void SetPadding(const std::string &padding) { pad_ = padding; }
private:
std::map<std::string, std::string> value_map_;
std::stringstream stream_;
@ -92,7 +94,8 @@ class BaseGenerator {
virtual bool generate() = 0;
static std::string NamespaceDir(const Parser &parser, const std::string &path,
const Namespace &ns);
const Namespace &ns,
const bool dasherize = false);
std::string GeneratedFileName(const std::string &path,
const std::string &file_name,
@ -114,7 +117,8 @@ class BaseGenerator {
BaseGenerator &operator=(const BaseGenerator &);
BaseGenerator(const BaseGenerator &);
std::string NamespaceDir(const Namespace &ns) const;
std::string NamespaceDir(const Namespace &ns,
const bool dasherize = false) const;
static const char *FlatBuffersGeneratedWarning();
@ -134,7 +138,8 @@ class BaseGenerator {
std::string WrapInNameSpace(const Namespace *ns,
const std::string &name) const;
std::string WrapInNameSpace(const Definition &def) const;
std::string WrapInNameSpace(const Definition &def,
const std::string &suffix = "") const;
std::string GetNameSpace(const Definition &def) const;

View File

@ -0,0 +1,64 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#define FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
namespace flatbuffers {
// DefaultAllocator uses new/delete to allocate memory regions
class DefaultAllocator : public Allocator {
public:
uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE {
return new uint8_t[size];
}
void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; }
static void dealloc(void *p, size_t) { delete[] static_cast<uint8_t *>(p); }
};
// These functions allow for a null allocator to mean use the default allocator,
// as used by DetachedBuffer and vector_downward below.
// This is to avoid having a statically or dynamically allocated default
// allocator, or having to move it between the classes that may own it.
inline uint8_t *Allocate(Allocator *allocator, size_t size) {
return allocator ? allocator->allocate(size)
: DefaultAllocator().allocate(size);
}
inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) {
if (allocator)
allocator->deallocate(p, size);
else
DefaultAllocator().deallocate(p, size);
}
inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p,
size_t old_size, size_t new_size,
size_t in_use_back, size_t in_use_front) {
return allocator ? allocator->reallocate_downward(old_p, old_size, new_size,
in_use_back, in_use_front)
: DefaultAllocator().reallocate_downward(
old_p, old_size, new_size, in_use_back, in_use_front);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_DEFAULT_ALLOCATOR_H_

View File

@ -0,0 +1,114 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DETACHED_BUFFER_H_
#define FLATBUFFERS_DETACHED_BUFFER_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
namespace flatbuffers {
// DetachedBuffer is a finished flatbuffer memory region, detached from its
// builder. The original memory region and allocator are also stored so that
// the DetachedBuffer can manage the memory lifetime.
class DetachedBuffer {
public:
DetachedBuffer()
: allocator_(nullptr),
own_allocator_(false),
buf_(nullptr),
reserved_(0),
cur_(nullptr),
size_(0) {}
DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf,
size_t reserved, uint8_t *cur, size_t sz)
: allocator_(allocator),
own_allocator_(own_allocator),
buf_(buf),
reserved_(reserved),
cur_(cur),
size_(sz) {}
DetachedBuffer(DetachedBuffer &&other)
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
buf_(other.buf_),
reserved_(other.reserved_),
cur_(other.cur_),
size_(other.size_) {
other.reset();
}
DetachedBuffer &operator=(DetachedBuffer &&other) {
if (this == &other) return *this;
destroy();
allocator_ = other.allocator_;
own_allocator_ = other.own_allocator_;
buf_ = other.buf_;
reserved_ = other.reserved_;
cur_ = other.cur_;
size_ = other.size_;
other.reset();
return *this;
}
~DetachedBuffer() { destroy(); }
const uint8_t *data() const { return cur_; }
uint8_t *data() { return cur_; }
size_t size() const { return size_; }
// These may change access mode, leave these at end of public section
FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other));
FLATBUFFERS_DELETE_FUNC(
DetachedBuffer &operator=(const DetachedBuffer &other));
protected:
Allocator *allocator_;
bool own_allocator_;
uint8_t *buf_;
size_t reserved_;
uint8_t *cur_;
size_t size_;
inline void destroy() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
if (own_allocator_ && allocator_) { delete allocator_; }
reset();
}
inline void reset() {
allocator_ = nullptr;
own_allocator_ = false;
buf_ = nullptr;
reserved_ = 0;
cur_ = nullptr;
size_ = 0;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_DETACHED_BUFFER_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -21,6 +21,7 @@
#include <limits>
#include <string>
#include "flatbuffers/bfbs_generator.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
@ -30,6 +31,13 @@ namespace flatbuffers {
extern void LogCompilerWarn(const std::string &warn);
extern void LogCompilerError(const std::string &err);
struct FlatCOption {
std::string short_opt;
std::string long_opt;
std::string parameter;
std::string description;
};
class FlatCompiler {
public:
// Output generator for the various programming languages and formats we
@ -41,16 +49,18 @@ class FlatCompiler {
typedef std::string (*MakeRuleFn)(const flatbuffers::Parser &parser,
const std::string &path,
const std::string &file_name);
typedef bool (*ParsingCompletedFn)(const flatbuffers::Parser &parser,
const std::string &output_path);
GenerateFn generate;
const char *generator_opt_short;
const char *generator_opt_long;
const char *lang_name;
bool schema_only;
GenerateFn generateGRPC;
flatbuffers::IDLOptions::Language lang;
const char *generator_help;
FlatCOption option;
MakeRuleFn make_rule;
BfbsGenerator *bfbs_generator;
ParsingCompletedFn parsing_completed;
};
typedef void (*WarnFn)(const FlatCompiler *flatc, const std::string &warn,
@ -77,6 +87,7 @@ class FlatCompiler {
int Compile(int argc, const char **argv);
std::string GetShortUsageString(const char *program_name) const;
std::string GetUsageString(const char *program_name) const;
private:
@ -92,6 +103,11 @@ class FlatCompiler {
void Error(const std::string &err, bool usage = true,
bool show_exe_name = true) const;
void AnnotateBinaries(const uint8_t *binary_schema,
uint64_t binary_schema_size,
const std::string & schema_filename,
const std::vector<std::string> &binary_files);
InitParams params_;
};

View File

@ -0,0 +1,36 @@
/*
* Copyright 2022 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_FLEX_FLAT_UTIL_H_
#define FLATBUFFERS_FLEX_FLAT_UTIL_H_
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
namespace flexbuffers {
// Verifies the `nested` flexbuffer within a flatbuffer vector is valid.
inline bool VerifyNestedFlexBuffer(
const flatbuffers::Vector<uint8_t> *const nested,
flatbuffers::Verifier &verifier) {
if (!nested) return true;
return verifier.Check(flexbuffers::VerifyBuffer(
nested->data(), nested->size(), verifier.GetFlexReuseTracker()));
}
} // namespace flexbuffers
#endif // FLATBUFFERS_FLEX_FLAT_UTIL_H_

View File

@ -53,7 +53,7 @@ enum Type {
FBT_INT = 1,
FBT_UINT = 2,
FBT_FLOAT = 3,
// Types above stored inline, types below store an offset.
// Types above stored inline, types below (except FBT_BOOL) store an offset.
FBT_KEY = 4,
FBT_STRING = 5,
FBT_INDIRECT_INT = 6,
@ -81,6 +81,8 @@ enum Type {
FBT_BOOL = 26,
FBT_VECTOR_BOOL =
36, // To Allow the same type of conversion of type to vector type
FBT_MAX_TYPE = 37
};
inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
@ -154,8 +156,10 @@ inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) {
// TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a
// constant, which here it isn't. Test if memcpy is still faster than
// the conditionals in ReadSizedScalar. Can also use inline asm.
// clang-format off
#if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86)
#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
// This is 64-bit Windows only, __movsb does not work on 32-bit Windows.
uint64_t u = 0;
__movsb(reinterpret_cast<uint8_t *>(&u),
reinterpret_cast<const uint8_t *>(data), byte_width);
@ -319,8 +323,8 @@ class FixedTypedVector : public Object {
return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
}
Type ElementType() { return type_; }
uint8_t size() { return len_; }
Type ElementType() const { return type_; }
uint8_t size() const { return len_; }
private:
Type type_;
@ -368,10 +372,7 @@ void AppendToString(std::string &s, T &&v, bool keys_quoted) {
class Reference {
public:
Reference()
: data_(nullptr),
parent_width_(0),
byte_width_(BIT_WIDTH_8),
type_(FBT_NULL) {}
: data_(nullptr), parent_width_(0), byte_width_(0), type_(FBT_NULL) {}
Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width,
Type type)
@ -572,7 +573,23 @@ class Reference {
auto keys = m.Keys();
auto vals = m.Values();
for (size_t i = 0; i < keys.size(); i++) {
keys[i].ToString(true, keys_quoted, s);
bool kq = keys_quoted;
if (!kq) {
// FlexBuffers keys may contain arbitrary characters, only allow
// unquoted if it looks like an "identifier":
const char *p = keys[i].AsKey();
if (!flatbuffers::is_alpha(*p) && *p != '_') {
kq = true;
} else {
while (*++p) {
if (!flatbuffers::is_alnum(*p) && *p != '_') {
kq = true;
break;
}
}
}
}
keys[i].ToString(true, kq, s);
s += ": ";
vals[i].ToString(true, keys_quoted, s);
if (i < keys.size() - 1) s += ", ";
@ -756,6 +773,8 @@ class Reference {
return false;
}
friend class Verifier;
const uint8_t *data_;
uint8_t parent_width_;
uint8_t byte_width_;
@ -850,6 +869,7 @@ inline Reference Map::operator[](const char *key) const {
case 2: comp = KeyCompare<uint16_t>; break;
case 4: comp = KeyCompare<uint32_t>; break;
case 8: comp = KeyCompare<uint64_t>; break;
default: FLATBUFFERS_ASSERT(false); return Reference();
}
auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
if (!res) return Reference(nullptr, 1, NullPackedType());
@ -872,7 +892,7 @@ inline Reference GetRoot(const uint8_t *buffer, size_t size) {
}
inline Reference GetRoot(const std::vector<uint8_t> &buffer) {
return GetRoot(flatbuffers::vector_data(buffer), buffer.size());
return GetRoot(buffer.data(), buffer.size());
}
// Flags that configure how the Builder behaves.
@ -900,6 +920,7 @@ class Builder FLATBUFFERS_FINAL_CLASS {
BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
: buf_(initial_size),
finished_(false),
has_duplicate_keys_(false),
flags_(flags),
force_min_bit_width_(BIT_WIDTH_8),
key_pool(KeyOffsetCompare(buf_)),
@ -907,6 +928,11 @@ class Builder FLATBUFFERS_FINAL_CLASS {
buf_.clear();
}
#ifdef FLATBUFFERS_DEFAULT_DECLARATION
Builder(Builder &&) = default;
Builder &operator=(Builder &&) = default;
#endif
/// @brief Get the serialized buffer (after you call `Finish()`).
/// @return Returns a vector owned by this class.
const std::vector<uint8_t> &GetBuffer() const {
@ -1062,7 +1088,16 @@ class Builder FLATBUFFERS_FINAL_CLASS {
return CreateBlob(data, len, 0, FBT_BLOB);
}
size_t Blob(const std::vector<uint8_t> &v) {
return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB);
return CreateBlob(v.data(), v.size(), 0, FBT_BLOB);
}
void Blob(const char *key, const void *data, size_t len) {
Key(key);
Blob(data, len);
}
void Blob(const char *key, const std::vector<uint8_t> &v) {
Key(key);
Blob(v);
}
// TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
@ -1080,7 +1115,7 @@ class Builder FLATBUFFERS_FINAL_CLASS {
return stack_.size();
}
// TODO(wvo): allow this to specify an aligment greater than the natural
// TODO(wvo): allow this to specify an alignment greater than the natural
// alignment.
size_t EndVector(size_t start, bool typed, bool fixed) {
auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
@ -1115,23 +1150,24 @@ class Builder FLATBUFFERS_FINAL_CLASS {
// step automatically when appliccable, and encourage people to write in
// sorted fashion.
// std::sort is typically already a lot faster on sorted data though.
auto dict =
reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start);
std::sort(dict, dict + len,
[&](const TwoValue &a, const TwoValue &b) -> bool {
auto as = reinterpret_cast<const char *>(
flatbuffers::vector_data(buf_) + a.key.u_);
auto bs = reinterpret_cast<const char *>(
flatbuffers::vector_data(buf_) + b.key.u_);
auto comp = strcmp(as, bs);
// If this assertion hits, you've added two keys with the same
// value to this map.
// TODO: Have to check for pointer equality, as some sort
// implementation apparently call this function with the same
// element?? Why?
FLATBUFFERS_ASSERT(comp || &a == &b);
return comp < 0;
});
auto dict = reinterpret_cast<TwoValue *>(stack_.data() + start);
std::sort(
dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool {
auto as = reinterpret_cast<const char *>(buf_.data() + a.key.u_);
auto bs = reinterpret_cast<const char *>(buf_.data() + b.key.u_);
auto comp = strcmp(as, bs);
// We want to disallow duplicate keys, since this results in a
// map where values cannot be found.
// But we can't assert here (since we don't want to fail on
// random JSON input) or have an error mechanism.
// Instead, we set has_duplicate_keys_ in the builder to
// signal this.
// TODO: Have to check for pointer equality, as some sort
// implementation apparently call this function with the same
// element?? Why?
if (!comp && &a != &b) has_duplicate_keys_ = true;
return comp < 0;
});
// First create a vector out of all keys.
// TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
// the first vector.
@ -1143,6 +1179,10 @@ class Builder FLATBUFFERS_FINAL_CLASS {
return static_cast<size_t>(vec.u_);
}
// Call this after EndMap to see if the map had any duplicate keys.
// Any map with such keys won't be able to retrieve all values.
bool HasDuplicateKeys() const { return has_duplicate_keys_; }
template<typename F> size_t Vector(F f) {
auto start = StartVector();
f();
@ -1181,7 +1221,7 @@ class Builder FLATBUFFERS_FINAL_CLASS {
Vector(elems, len);
}
template<typename T> void Vector(const std::vector<T> &vec) {
Vector(flatbuffers::vector_data(vec), vec.size());
Vector(vec.data(), vec.size());
}
template<typename F> size_t TypedVector(F f) {
@ -1256,7 +1296,7 @@ class Builder FLATBUFFERS_FINAL_CLASS {
// auto id = builder.LastValue(); // Remember where we stored it.
// .. more code goes here ..
// builder.ReuseValue(id); // Refers to same double by offset.
// LastValue works regardless of wether the value has a key or not.
// LastValue works regardless of whether the value has a key or not.
// Works on any data type.
struct Value;
Value LastValue() { return stack_.back(); }
@ -1417,7 +1457,10 @@ class Builder FLATBUFFERS_FINAL_CLASS {
Value(uint64_t u, Type t, BitWidth bw)
: u_(u), type_(t), min_bit_width_(bw) {}
Value(float f) : f_(f), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {}
Value(float f)
: f_(static_cast<double>(f)),
type_(FBT_FLOAT),
min_bit_width_(BIT_WIDTH_32) {}
Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const {
@ -1522,7 +1565,8 @@ class Builder FLATBUFFERS_FINAL_CLASS {
Type vector_type = FBT_KEY;
// Check bit widths and types for all elements.
for (size_t i = start; i < stack_.size(); i += step) {
auto elem_width = stack_[i].ElemWidth(buf_.size(), i + prefix_elems);
auto elem_width =
stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
bit_width = (std::max)(bit_width, elem_width);
if (typed) {
if (i == start) {
@ -1534,9 +1578,9 @@ class Builder FLATBUFFERS_FINAL_CLASS {
}
}
}
// If you get this assert, your fixed types are not one of:
// If you get this assert, your typed types are not one of:
// Int / UInt / Float / Key.
FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type));
FLATBUFFERS_ASSERT(!typed || IsTypedVectorElementType(vector_type));
auto byte_width = Align(bit_width);
// Write vector. First the keys width/offset if available, and size.
if (keys) {
@ -1570,6 +1614,7 @@ class Builder FLATBUFFERS_FINAL_CLASS {
std::vector<Value> stack_;
bool finished_;
bool has_duplicate_keys_;
BuilderFlag flags_;
@ -1578,10 +1623,8 @@ class Builder FLATBUFFERS_FINAL_CLASS {
struct KeyOffsetCompare {
explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
bool operator()(size_t a, size_t b) const {
auto stra =
reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a);
auto strb =
reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b);
auto stra = reinterpret_cast<const char *>(buf_->data() + a);
auto strb = reinterpret_cast<const char *>(buf_->data() + b);
return strcmp(stra, strb) < 0;
}
const std::vector<uint8_t> *buf_;
@ -1592,11 +1635,10 @@ class Builder FLATBUFFERS_FINAL_CLASS {
explicit StringOffsetCompare(const std::vector<uint8_t> &buf)
: buf_(&buf) {}
bool operator()(const StringOffset &a, const StringOffset &b) const {
auto stra = reinterpret_cast<const char *>(
flatbuffers::vector_data(*buf_) + a.first);
auto strb = reinterpret_cast<const char *>(
flatbuffers::vector_data(*buf_) + b.first);
return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0;
auto stra = buf_->data() + a.first;
auto strb = buf_->data() + b.first;
auto cr = memcmp(stra, strb, (std::min)(a.second, b.second) + 1);
return cr < 0 || (cr == 0 && a.second < b.second);
}
const std::vector<uint8_t> *buf_;
};
@ -1606,8 +1648,237 @@ class Builder FLATBUFFERS_FINAL_CLASS {
KeyOffsetMap key_pool;
StringOffsetMap string_pool;
friend class Verifier;
};
// Helper class to verify the integrity of a FlexBuffer
class Verifier FLATBUFFERS_FINAL_CLASS {
public:
Verifier(const uint8_t *buf, size_t buf_len,
// Supplying this vector likely results in faster verification
// of larger buffers with many shared keys/strings, but
// comes at the cost of using additional memory the same size of
// the buffer being verified, so it is by default off.
std::vector<uint8_t> *reuse_tracker = nullptr,
bool _check_alignment = true, size_t max_depth = 64)
: buf_(buf),
size_(buf_len),
depth_(0),
max_depth_(max_depth),
num_vectors_(0),
max_vectors_(buf_len),
check_alignment_(_check_alignment),
reuse_tracker_(reuse_tracker) {
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
if (reuse_tracker_) {
reuse_tracker_->clear();
reuse_tracker_->resize(size_, PackedType(BIT_WIDTH_8, FBT_NULL));
}
}
private:
// Central location where any verification failures register.
bool Check(bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
FLATBUFFERS_ASSERT(ok);
#endif
// clang-format on
return ok;
}
// Verify any range within the buffer.
bool VerifyFrom(size_t elem, size_t elem_len) const {
return Check(elem_len < size_ && elem <= size_ - elem_len);
}
bool VerifyBefore(size_t elem, size_t elem_len) const {
return Check(elem_len <= elem);
}
bool VerifyFromPointer(const uint8_t *p, size_t len) {
auto o = static_cast<size_t>(p - buf_);
return VerifyFrom(o, len);
}
bool VerifyBeforePointer(const uint8_t *p, size_t len) {
auto o = static_cast<size_t>(p - buf_);
return VerifyBefore(o, len);
}
bool VerifyByteWidth(size_t width) {
return Check(width == 1 || width == 2 || width == 4 || width == 8);
}
bool VerifyType(int type) { return Check(type >= 0 && type < FBT_MAX_TYPE); }
bool VerifyOffset(uint64_t off, const uint8_t *p) {
return Check(off <= static_cast<uint64_t>(size_)) &&
off <= static_cast<uint64_t>(p - buf_);
}
bool VerifyAlignment(const uint8_t *p, size_t size) const {
auto o = static_cast<size_t>(p - buf_);
return Check((o & (size - 1)) == 0 || !check_alignment_);
}
// Macro, since we want to escape from parent function & use lazy args.
#define FLEX_CHECK_VERIFIED(P, PACKED_TYPE) \
if (reuse_tracker_) { \
auto packed_type = PACKED_TYPE; \
auto existing = (*reuse_tracker_)[P - buf_]; \
if (existing == packed_type) return true; \
/* Fail verification if already set with different type! */ \
if (!Check(existing == 0)) return false; \
(*reuse_tracker_)[P - buf_] = packed_type; \
}
bool VerifyVector(Reference r, const uint8_t *p, Type elem_type) {
// Any kind of nesting goes thru this function, so guard against that
// here, both with simple nesting checks, and the reuse tracker if on.
depth_++;
num_vectors_++;
if (!Check(depth_ <= max_depth_ && num_vectors_ <= max_vectors_))
return false;
auto size_byte_width = r.byte_width_;
if (!VerifyBeforePointer(p, size_byte_width)) return false;
FLEX_CHECK_VERIFIED(p - size_byte_width,
PackedType(Builder::WidthB(size_byte_width), r.type_));
auto sized = Sized(p, size_byte_width);
auto num_elems = sized.size();
auto elem_byte_width = r.type_ == FBT_STRING || r.type_ == FBT_BLOB
? uint8_t(1)
: r.byte_width_;
auto max_elems = SIZE_MAX / elem_byte_width;
if (!Check(num_elems < max_elems))
return false; // Protect against byte_size overflowing.
auto byte_size = num_elems * elem_byte_width;
if (!VerifyFromPointer(p, byte_size)) return false;
if (elem_type == FBT_NULL) {
// Verify type bytes after the vector.
if (!VerifyFromPointer(p + byte_size, num_elems)) return false;
auto v = Vector(p, size_byte_width);
for (size_t i = 0; i < num_elems; i++)
if (!VerifyRef(v[i])) return false;
} else if (elem_type == FBT_KEY) {
auto v = TypedVector(p, elem_byte_width, FBT_KEY);
for (size_t i = 0; i < num_elems; i++)
if (!VerifyRef(v[i])) return false;
} else {
FLATBUFFERS_ASSERT(IsInline(elem_type));
}
depth_--;
return true;
}
bool VerifyKeys(const uint8_t *p, uint8_t byte_width) {
// The vector part of the map has already been verified.
const size_t num_prefixed_fields = 3;
if (!VerifyBeforePointer(p, byte_width * num_prefixed_fields)) return false;
p -= byte_width * num_prefixed_fields;
auto off = ReadUInt64(p, byte_width);
if (!VerifyOffset(off, p)) return false;
auto key_byte_with =
static_cast<uint8_t>(ReadUInt64(p + byte_width, byte_width));
if (!VerifyByteWidth(key_byte_with)) return false;
return VerifyVector(Reference(p, byte_width, key_byte_with, FBT_VECTOR_KEY),
p - off, FBT_KEY);
}
bool VerifyKey(const uint8_t *p) {
FLEX_CHECK_VERIFIED(p, PackedType(BIT_WIDTH_8, FBT_KEY));
while (p < buf_ + size_)
if (*p++) return true;
return false;
}
#undef FLEX_CHECK_VERIFIED
bool VerifyTerminator(const String &s) {
return VerifyFromPointer(reinterpret_cast<const uint8_t *>(s.c_str()),
s.size() + 1);
}
bool VerifyRef(Reference r) {
// r.parent_width_ and r.data_ already verified.
if (!VerifyByteWidth(r.byte_width_) || !VerifyType(r.type_)) {
return false;
}
if (IsInline(r.type_)) {
// Inline scalars, don't require further verification.
return true;
}
// All remaining types are an offset.
auto off = ReadUInt64(r.data_, r.parent_width_);
if (!VerifyOffset(off, r.data_)) return false;
auto p = r.Indirect();
if (!VerifyAlignment(p, r.byte_width_)) return false;
switch (r.type_) {
case FBT_INDIRECT_INT:
case FBT_INDIRECT_UINT:
case FBT_INDIRECT_FLOAT: return VerifyFromPointer(p, r.byte_width_);
case FBT_KEY: return VerifyKey(p);
case FBT_MAP:
return VerifyVector(r, p, FBT_NULL) && VerifyKeys(p, r.byte_width_);
case FBT_VECTOR: return VerifyVector(r, p, FBT_NULL);
case FBT_VECTOR_INT: return VerifyVector(r, p, FBT_INT);
case FBT_VECTOR_BOOL:
case FBT_VECTOR_UINT: return VerifyVector(r, p, FBT_UINT);
case FBT_VECTOR_FLOAT: return VerifyVector(r, p, FBT_FLOAT);
case FBT_VECTOR_KEY: return VerifyVector(r, p, FBT_KEY);
case FBT_VECTOR_STRING_DEPRECATED:
// Use of FBT_KEY here intentional, see elsewhere.
return VerifyVector(r, p, FBT_KEY);
case FBT_BLOB: return VerifyVector(r, p, FBT_UINT);
case FBT_STRING:
return VerifyVector(r, p, FBT_UINT) &&
VerifyTerminator(String(p, r.byte_width_));
case FBT_VECTOR_INT2:
case FBT_VECTOR_UINT2:
case FBT_VECTOR_FLOAT2:
case FBT_VECTOR_INT3:
case FBT_VECTOR_UINT3:
case FBT_VECTOR_FLOAT3:
case FBT_VECTOR_INT4:
case FBT_VECTOR_UINT4:
case FBT_VECTOR_FLOAT4: {
uint8_t len = 0;
auto vtype = ToFixedTypedVectorElementType(r.type_, &len);
if (!VerifyType(vtype)) return false;
return VerifyFromPointer(p, r.byte_width_ * len);
}
default: return false;
}
}
public:
bool VerifyBuffer() {
if (!Check(size_ >= 3)) return false;
auto end = buf_ + size_;
auto byte_width = *--end;
auto packed_type = *--end;
return VerifyByteWidth(byte_width) && Check(end - buf_ >= byte_width) &&
VerifyRef(Reference(end - byte_width, byte_width, packed_type));
}
private:
const uint8_t *buf_;
size_t size_;
size_t depth_;
const size_t max_depth_;
size_t num_vectors_;
const size_t max_vectors_;
bool check_alignment_;
std::vector<uint8_t> *reuse_tracker_;
};
// Utility function that constructs the Verifier for you, see above for
// parameters.
inline bool VerifyBuffer(const uint8_t *buf, size_t buf_len,
std::vector<uint8_t> *reuse_tracker = nullptr) {
Verifier verifier(buf, buf_len, reuse_tracker);
return verifier.VerifyBuffer();
}
} // namespace flexbuffers
#if defined(_MSC_VER)

View File

@ -20,8 +20,9 @@
// Helper functionality to glue FlatBuffers and GRPC.
#include "flatbuffers/flatbuffers.h"
#include "grpc++/support/byte_buffer.h"
#include "grpc/byte_buffer_reader.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
namespace flatbuffers {
namespace grpc {
@ -32,33 +33,23 @@ namespace grpc {
// is refcounted and ownership is be managed automatically.
template<class T> class Message {
public:
Message() : slice_(grpc_empty_slice()) {}
Message() {}
Message(grpc_slice slice, bool add_ref)
: slice_(add_ref ? grpc_slice_ref(slice) : slice) {}
Message(::grpc::Slice slice) : slice_(slice) {}
Message &operator=(const Message &other) = delete;
Message(Message &&other) : slice_(other.slice_) {
other.slice_ = grpc_empty_slice();
}
Message(Message &&other) = default;
Message(const Message &other) = delete;
Message &operator=(Message &&other) {
grpc_slice_unref(slice_);
slice_ = other.slice_;
other.slice_ = grpc_empty_slice();
return *this;
}
Message &operator=(Message &&other) = default;
~Message() { grpc_slice_unref(slice_); }
const uint8_t *mutable_data() const { return slice_.begin(); }
const uint8_t *mutable_data() const { return GRPC_SLICE_START_PTR(slice_); }
const uint8_t *data() const { return slice_.begin(); }
const uint8_t *data() const { return GRPC_SLICE_START_PTR(slice_); }
size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
size_t size() const { return slice_.size(); }
bool Verify() const {
Verifier verifier(data(), size());
@ -70,10 +61,10 @@ template<class T> class Message {
const T *GetRoot() const { return flatbuffers::GetRoot<T>(data()); }
// This is only intended for serializer use, or if you know what you're doing
const grpc_slice &BorrowSlice() const { return slice_; }
const ::grpc::Slice &BorrowSlice() const { return slice_; }
private:
grpc_slice slice_;
::grpc::Slice slice_;
};
class MessageBuilder;
@ -83,12 +74,12 @@ class MessageBuilder;
// efficient to transfer buffers to gRPC.
class SliceAllocator : public Allocator {
public:
SliceAllocator() : slice_(grpc_empty_slice()) {}
SliceAllocator() {}
SliceAllocator(const SliceAllocator &other) = delete;
SliceAllocator &operator=(const SliceAllocator &other) = delete;
SliceAllocator(SliceAllocator &&other) : slice_(grpc_empty_slice()) {
SliceAllocator(SliceAllocator &&other) {
// default-construct and swap idiom
swap(other);
}
@ -105,45 +96,43 @@ class SliceAllocator : public Allocator {
swap(slice_, other.slice_);
}
virtual ~SliceAllocator() { grpc_slice_unref(slice_); }
virtual ~SliceAllocator() {}
virtual uint8_t *allocate(size_t size) override {
FLATBUFFERS_ASSERT(GRPC_SLICE_IS_EMPTY(slice_));
slice_ = grpc_slice_malloc(size);
return GRPC_SLICE_START_PTR(slice_);
FLATBUFFERS_ASSERT(slice_.size() == 0);
slice_ = ::grpc::Slice(size);
return const_cast<uint8_t *>(slice_.begin());
}
virtual void deallocate(uint8_t *p, size_t size) override {
FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
grpc_slice_unref(slice_);
slice_ = grpc_empty_slice();
FLATBUFFERS_ASSERT(p == slice_.begin());
FLATBUFFERS_ASSERT(size == slice_.size());
slice_ = ::grpc::Slice();
}
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) override {
FLATBUFFERS_ASSERT(old_p == GRPC_SLICE_START_PTR(slice_));
FLATBUFFERS_ASSERT(old_size == GRPC_SLICE_LENGTH(slice_));
FLATBUFFERS_ASSERT(old_p == slice_.begin());
FLATBUFFERS_ASSERT(old_size == slice_.size());
FLATBUFFERS_ASSERT(new_size > old_size);
grpc_slice old_slice = slice_;
grpc_slice new_slice = grpc_slice_malloc(new_size);
uint8_t *new_p = GRPC_SLICE_START_PTR(new_slice);
::grpc::Slice old_slice = slice_;
::grpc::Slice new_slice = ::grpc::Slice(new_size);
uint8_t *new_p = const_cast<uint8_t *>(new_slice.begin());
memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
in_use_front);
slice_ = new_slice;
grpc_slice_unref(old_slice);
return new_p;
}
private:
grpc_slice &get_slice(uint8_t *p, size_t size) {
FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
::grpc::Slice &get_slice(uint8_t *p, size_t size) {
FLATBUFFERS_ASSERT(p == slice_.begin());
FLATBUFFERS_ASSERT(size == slice_.size());
return slice_;
}
grpc_slice slice_;
::grpc::Slice slice_;
friend class MessageBuilder;
};
@ -184,9 +173,9 @@ class MessageBuilder : private detail::SliceAllocatorMember,
if (buf_.capacity()) {
uint8_t *buf = buf_.scratch_data(); // pointer to memory
size_t capacity = buf_.capacity(); // size of memory
slice_allocator_.slice_ = grpc_slice_new_with_len(buf, capacity, dealloc);
slice_allocator_.slice_ = ::grpc::Slice(buf, capacity, dealloc);
} else {
slice_allocator_.slice_ = grpc_empty_slice();
slice_allocator_.slice_ = ::grpc::Slice();
}
}
@ -221,10 +210,10 @@ class MessageBuilder : private detail::SliceAllocatorMember,
// Releases the ownership of the buffer pointer.
// Returns the size, offset, and the original grpc_slice that
// allocated the buffer. Also see grpc_slice_unref().
uint8_t *ReleaseRaw(size_t &size, size_t &offset, grpc_slice &slice) {
uint8_t *ReleaseRaw(size_t &size, size_t &offset, ::grpc::Slice &slice) {
uint8_t *buf = FlatBufferBuilder::ReleaseRaw(size, offset);
slice = slice_allocator_.slice_;
slice_allocator_.slice_ = grpc_empty_slice();
slice_allocator_.slice_ = ::grpc::Slice();
return buf;
}
@ -247,11 +236,11 @@ class MessageBuilder : private detail::SliceAllocatorMember,
auto begin = msg_data - buf_data;
auto end = begin + msg_size;
// Get the slice we are working with (no refcount change)
grpc_slice slice = slice_allocator_.get_slice(buf_data, buf_size);
::grpc::Slice slice = slice_allocator_.get_slice(buf_data, buf_size);
// Extract a subslice of the existing slice (increment refcount)
grpc_slice subslice = grpc_slice_sub(slice, begin, end);
::grpc::Slice subslice = slice.sub(begin, end);
// Wrap the subslice in a `Message<T>`, but don't increment refcount
Message<T> msg(subslice, false);
Message<T> msg(subslice);
return msg;
}
@ -273,45 +262,26 @@ namespace grpc {
template<class T> class SerializationTraits<flatbuffers::grpc::Message<T>> {
public:
static grpc::Status Serialize(const flatbuffers::grpc::Message<T> &msg,
grpc_byte_buffer **buffer, bool *own_buffer) {
// We are passed in a `Message<T>`, which is a wrapper around a
// `grpc_slice`. We extract it here using `BorrowSlice()`. The const cast
// is necessary because the `grpc_raw_byte_buffer_create` func expects
// non-const slices in order to increment their refcounts.
grpc_slice *slice = const_cast<grpc_slice *>(&msg.BorrowSlice());
// Now use `grpc_raw_byte_buffer_create` to package the single slice into a
// `grpc_byte_buffer`, incrementing the refcount in the process.
*buffer = grpc_raw_byte_buffer_create(slice, 1);
ByteBuffer *buffer, bool *own_buffer) {
// Package the single slice into a `ByteBuffer`,
// incrementing the refcount in the process.
*buffer = ByteBuffer(&msg.BorrowSlice(), 1);
*own_buffer = true;
return grpc::Status::OK;
}
// Deserialize by pulling the
static grpc::Status Deserialize(grpc_byte_buffer *buffer,
static grpc::Status Deserialize(ByteBuffer *buf,
flatbuffers::grpc::Message<T> *msg) {
if (!buffer) {
return ::grpc::Status(::grpc::StatusCode::INTERNAL, "No payload");
Slice slice;
if (!buf->TrySingleSlice(&slice).ok()) {
if (!buf->DumpToSingleSlice(&slice).ok()) {
buf->Clear();
return ::grpc::Status(::grpc::StatusCode::INTERNAL, "No payload");
}
}
// Check if this is a single uncompressed slice.
if ((buffer->type == GRPC_BB_RAW) &&
(buffer->data.raw.compression == GRPC_COMPRESS_NONE) &&
(buffer->data.raw.slice_buffer.count == 1)) {
// If it is, then we can reference the `grpc_slice` directly.
grpc_slice slice = buffer->data.raw.slice_buffer.slices[0];
// We wrap a `Message<T>` around the slice, incrementing the refcount.
*msg = flatbuffers::grpc::Message<T>(slice, true);
} else {
// Otherwise, we need to use `grpc_byte_buffer_reader_readall` to read
// `buffer` into a single contiguous `grpc_slice`. The gRPC reader gives
// us back a new slice with the refcount already incremented.
grpc_byte_buffer_reader reader;
grpc_byte_buffer_reader_init(&reader, buffer);
grpc_slice slice = grpc_byte_buffer_reader_readall(&reader);
grpc_byte_buffer_reader_destroy(&reader);
// We wrap a `Message<T>` around the slice, but don't increment refcount
*msg = flatbuffers::grpc::Message<T>(slice, false);
}
grpc_byte_buffer_destroy(buffer);
*msg = flatbuffers::grpc::Message<T>(slice);
buf->Clear();
#if FLATBUFFERS_GRPC_DISABLE_AUTO_VERIFICATION
return ::grpc::Status::OK;
#else

View File

@ -17,6 +17,7 @@
#ifndef FLATBUFFERS_IDL_H_
#define FLATBUFFERS_IDL_H_
#include <functional>
#include <map>
#include <memory>
#include <stack>
@ -27,15 +28,11 @@
#include "flatbuffers/hash.h"
#include "flatbuffers/reflection.h"
#if !defined(FLATBUFFERS_CPP98_STL)
# include <functional>
#endif // !defined(FLATBUFFERS_CPP98_STL)
// This file defines the data types representing a parsed IDL (Interface
// Definition Language) / schema file.
// Limits maximum depth of nested objects.
// Prevents stack overflow while parse flatbuffers or json.
// Prevents stack overflow while parse scheme, or json, or flexbuffer.
#if !defined(FLATBUFFERS_MAX_PARSING_DEPTH)
# define FLATBUFFERS_MAX_PARSING_DEPTH 64
#endif
@ -75,8 +72,8 @@ namespace flatbuffers {
// - Go type.
// - C# / .Net type.
// - Python type.
// - Rust type.
// - Kotlin type.
// - Rust type.
// using these macros, we can now write code dealing with types just once, e.g.
@ -167,7 +164,7 @@ struct Type {
enum_def(_ed),
fixed_length(_fixed_length) {}
bool operator==(const Type &o) {
bool operator==(const Type &o) const {
return base_type == o.base_type && element == o.element &&
struct_def == o.struct_def && enum_def == o.enum_def;
}
@ -207,7 +204,7 @@ template<typename T> class SymbolTable {
}
bool Add(const std::string &name, T *e) {
vector_emplace_back(&vec, e);
vec.emplace_back(e);
auto it = dict.find(name);
if (it != dict.end()) return true;
dict[name] = e;
@ -239,7 +236,7 @@ template<typename T> class SymbolTable {
struct Namespace {
Namespace() : from_table(0) {}
// Given a (potentally unqualified) name, return the "fully qualified" name
// Given a (potentially unqualified) name, return the "fully qualified" name
// which has a full namespaced descriptor.
// With max_components you can request less than the number of components
// the current namespace has.
@ -266,7 +263,8 @@ struct Definition {
defined_namespace(nullptr),
serialized_location(0),
index(-1),
refcount(1) {}
refcount(1),
declaration_file(nullptr) {}
flatbuffers::Offset<
flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>>
@ -286,17 +284,18 @@ struct Definition {
uoffset_t serialized_location;
int index; // Inside the vector it is stored.
int refcount;
const std::string *declaration_file;
};
struct FieldDef : public Definition {
FieldDef()
: deprecated(false),
required(false),
key(false),
shared(false),
native_inline(false),
flexbuffer(false),
nested_flatbuffer(NULL),
presence(kDefault),
nested_flatbuffer(nullptr),
padding(0) {}
Offset<reflection::Field> Serialize(FlatBufferBuilder *builder, uint16_t id,
@ -304,16 +303,42 @@ struct FieldDef : public Definition {
bool Deserialize(Parser &parser, const reflection::Field *field);
bool IsScalarOptional() const {
return IsScalar(value.type.base_type) && IsOptional();
}
bool IsOptional() const { return presence == kOptional; }
bool IsRequired() const { return presence == kRequired; }
bool IsDefault() const { return presence == kDefault; }
Value value;
bool deprecated; // Field is allowed to be present in old data, but can't be.
// written in new data nor accessed in new code.
bool required; // Field must always be present.
bool key; // Field functions as a key for creating sorted vectors.
bool shared; // Field will be using string pooling (i.e. CreateSharedString)
// as default serialization behavior if field is a string.
bool native_inline; // Field will be defined inline (instead of as a pointer)
// for native tables if field is a struct.
bool flexbuffer; // This field contains FlexBuffer data.
enum Presence {
// Field must always be present.
kRequired,
// Non-presence should be signalled to and controlled by users.
kOptional,
// Non-presence is hidden from users.
// Implementations may omit writing default values.
kDefault,
};
Presence static MakeFieldPresence(bool optional, bool required) {
FLATBUFFERS_ASSERT(!(required && optional));
// clang-format off
return required ? FieldDef::kRequired
: optional ? FieldDef::kOptional
: FieldDef::kDefault;
// clang-format on
}
Presence presence;
StructDef *nested_flatbuffer; // This field contains nested FlatBuffer data.
size_t padding; // Bytes to always pad after this field.
};
@ -410,9 +435,7 @@ struct EnumDef : public Definition {
size_t size() const { return vals.vec.size(); }
const std::vector<EnumVal *> &Vals() const {
return vals.vec;
}
const std::vector<EnumVal *> &Vals() const { return vals.vec; }
const EnumVal *Lookup(const std::string &enum_name) const {
return vals.Lookup(enum_name);
@ -433,18 +456,38 @@ struct EnumDef : public Definition {
SymbolTable<EnumVal> vals;
};
inline bool IsString(const Type &type) {
return type.base_type == BASE_TYPE_STRING;
}
inline bool IsStruct(const Type &type) {
return type.base_type == BASE_TYPE_STRUCT && type.struct_def->fixed;
}
inline bool IsTable(const Type &type) {
return type.base_type == BASE_TYPE_STRUCT && !type.struct_def->fixed;
}
inline bool IsUnion(const Type &type) {
return type.enum_def != nullptr && type.enum_def->is_union;
}
inline bool IsUnionType(const Type &type) {
return IsUnion(type) && IsInteger(type.base_type);
}
inline bool IsVector(const Type &type) {
return type.base_type == BASE_TYPE_VECTOR;
}
inline bool IsVectorOfStruct(const Type& type) {
return IsVector(type) && IsStruct(type.VectorType());
}
inline bool IsVectorOfTable(const Type& type) {
return IsVector(type) && IsTable(type.VectorType());
}
inline bool IsArray(const Type &type) {
return type.base_type == BASE_TYPE_ARRAY;
}
@ -506,19 +549,39 @@ struct ServiceDef : public Definition {
SymbolTable<RPCCall> calls;
};
struct IncludedFile {
// The name of the schema file being included, as defined in the .fbs file.
// This includes the prefix (e.g., include "foo/bar/baz.fbs" would mean this
// value is "foo/bar/baz.fbs").
std::string schema_name;
// The filename of where the included file was found, after searching the
// relative paths plus any other paths included with `flatc -I ...`. Note,
// while this is sometimes the same as schema_name, it is not always, since it
// can be defined relative to where flatc was invoked.
std::string filename;
};
// Since IncludedFile is contained within a std::set, need to provide ordering.
inline bool operator<(const IncludedFile &a, const IncludedFile &b) {
return a.filename < b.filename;
}
// Container of options that may apply to any of the source/text generators.
struct IDLOptions {
// field case style options for C++
enum CaseStyle { CaseStyle_Unchanged = 0, CaseStyle_Upper, CaseStyle_Lower };
bool gen_jvmstatic;
// Use flexbuffers instead for binary and text generation
bool use_flexbuffers;
bool strict_json;
bool skip_js_exports;
bool use_goog_js_export_format;
bool use_ES6_js_export_format;
bool output_default_scalars_in_json;
int indent_step;
bool output_enum_identifiers;
bool prefixed_enums;
bool scoped_enums;
bool swift_implementation_only;
bool include_dependence_headers;
bool mutable_buffer;
bool one_file;
@ -532,24 +595,24 @@ struct IDLOptions {
std::string cpp_object_api_pointer_type;
std::string cpp_object_api_string_type;
bool cpp_object_api_string_flexible_constructor;
CaseStyle cpp_object_api_field_case_style;
bool cpp_direct_copy;
bool gen_nullable;
bool java_checkerframework;
bool gen_generated;
bool gen_json_coders;
std::string object_prefix;
std::string object_suffix;
bool union_value_namespacing;
bool allow_non_utf8;
bool natural_utf8;
std::string include_prefix;
bool keep_include_path;
bool keep_prefix;
bool binary_schema_comments;
bool binary_schema_builtins;
bool binary_schema_gen_embed;
bool skip_flatbuffers_import;
std::string go_import;
std::string go_namespace;
bool reexport_ts_modules;
bool js_ts_short_names;
bool protobuf_ascii_alike;
bool size_prefixed;
std::string root_type;
@ -558,9 +621,19 @@ struct IDLOptions {
bool cs_gen_json_serializer;
std::vector<std::string> cpp_includes;
std::string cpp_std;
bool cpp_static_reflection;
std::string proto_namespace_suffix;
std::string filename_suffix;
std::string filename_extension;
bool no_warnings;
bool warnings_as_errors;
std::string project_root;
bool cs_global_alias;
bool json_nested_flatbuffers;
bool json_nested_flexbuffers;
bool json_nested_legacy_flatbuffers;
bool ts_flat_file;
bool no_leak_private_annotations;
// Possible options for the more general generator below.
enum Language {
@ -568,7 +641,6 @@ struct IDLOptions {
kCSharp = 1 << 1,
kGo = 1 << 2,
kCpp = 1 << 3,
kJs = 1 << 4,
kPython = 1 << 5,
kPhp = 1 << 6,
kJson = 1 << 7,
@ -584,12 +656,19 @@ struct IDLOptions {
kMAX
};
Language lang;
enum MiniReflect { kNone, kTypes, kTypesAndNames };
MiniReflect mini_reflect;
// If set, require all fields in a table to be explicitly numbered.
bool require_explicit_ids;
// If set, implement serde::Serialize for generated Rust types
bool rust_serialize;
// If set, generate rust types in individual files with a root module file.
bool rust_module_root_file;
// The corresponding language bit will be set if a language is included
// for code generation.
unsigned long lang_to_generate;
@ -603,16 +682,15 @@ struct IDLOptions {
bool set_empty_vectors_to_null;
IDLOptions()
: use_flexbuffers(false),
: gen_jvmstatic(false),
use_flexbuffers(false),
strict_json(false),
skip_js_exports(false),
use_goog_js_export_format(false),
use_ES6_js_export_format(false),
output_default_scalars_in_json(false),
indent_step(2),
output_enum_identifiers(true),
prefixed_enums(true),
scoped_enums(false),
swift_implementation_only(false),
include_dependence_headers(true),
mutable_buffer(false),
one_file(false),
@ -625,29 +703,41 @@ struct IDLOptions {
gen_compare(false),
cpp_object_api_pointer_type("std::unique_ptr"),
cpp_object_api_string_flexible_constructor(false),
cpp_object_api_field_case_style(CaseStyle_Unchanged),
cpp_direct_copy(true),
gen_nullable(false),
java_checkerframework(false),
gen_generated(false),
gen_json_coders(false),
object_suffix("T"),
union_value_namespacing(true),
allow_non_utf8(false),
natural_utf8(false),
keep_include_path(false),
keep_prefix(false),
binary_schema_comments(false),
binary_schema_builtins(false),
binary_schema_gen_embed(false),
skip_flatbuffers_import(false),
reexport_ts_modules(true),
js_ts_short_names(false),
protobuf_ascii_alike(false),
size_prefixed(false),
force_defaults(false),
java_primitive_has_method(false),
cs_gen_json_serializer(false),
cpp_static_reflection(false),
filename_suffix("_generated"),
filename_extension(),
lang(IDLOptions::kJava),
no_warnings(false),
warnings_as_errors(false),
project_root(""),
cs_global_alias(false),
json_nested_flatbuffers(true),
json_nested_flexbuffers(true),
json_nested_legacy_flatbuffers(false),
ts_flat_file(false),
no_leak_private_annotations(false),
mini_reflect(IDLOptions::kNone),
require_explicit_ids(false),
rust_serialize(false),
rust_module_root_file(false),
lang_to_generate(0),
set_empty_strings_to_null(true),
set_empty_vectors_to_null(true) {}
@ -747,9 +837,11 @@ class Parser : public ParserState {
root_struct_def_(nullptr),
opts(options),
uses_flexbuffers_(false),
has_warning_(false),
advanced_features_(0),
source_(nullptr),
anonymous_counter(0),
recurse_protection_counter(0) {
anonymous_counter_(0),
parse_depth_counter_(0) {
if (opts.force_defaults) { builder_.ForceDefaults(true); }
// Start out with the empty namespace being current.
empty_namespace_ = new Namespace();
@ -776,6 +868,7 @@ class Parser : public ParserState {
known_attributes_["native_inline"] = true;
known_attributes_["native_custom_alloc"] = true;
known_attributes_["native_type"] = true;
known_attributes_["native_type_pack_name"] = true;
known_attributes_["native_default"] = true;
known_attributes_["flexbuffer"] = true;
known_attributes_["private"] = true;
@ -801,6 +894,8 @@ class Parser : public ParserState {
bool Parse(const char *_source, const char **include_paths = nullptr,
const char *source_filename = nullptr);
bool ParseJson(const char *json, const char *json_filename = nullptr);
// Set the root type. May override the one set in the schema.
bool SetRootType(const char *name);
@ -835,12 +930,25 @@ class Parser : public ParserState {
flexbuffers::Builder *builder);
StructDef *LookupStruct(const std::string &id) const;
StructDef *LookupStructThruParentNamespaces(const std::string &id) const;
std::string UnqualifiedName(const std::string &fullQualifiedName);
FLATBUFFERS_CHECKED_ERROR Error(const std::string &msg);
// @brief Verify that any of 'opts.lang_to_generate' supports Optional scalars
// in a schema.
// @param opts Options used to parce a schema and generate code.
static bool SupportsOptionalScalars(const flatbuffers::IDLOptions &opts);
// Get the set of included files that are directly referenced by the file
// being parsed. This does not include files that are transitively included by
// others includes.
std::vector<IncludedFile> GetIncludedFiles() const;
private:
class ParseDepthGuard;
void Message(const std::string &msg);
void Warning(const std::string &msg);
FLATBUFFERS_CHECKED_ERROR ParseHexNum(int nibbles, uint64_t *val);
@ -859,7 +967,7 @@ class Parser : public ParserState {
const std::string &name, const Type &type,
FieldDef **dest);
FLATBUFFERS_CHECKED_ERROR ParseField(StructDef &struct_def);
FLATBUFFERS_CHECKED_ERROR ParseString(Value &val);
FLATBUFFERS_CHECKED_ERROR ParseString(Value &val, bool use_string_pooling);
FLATBUFFERS_CHECKED_ERROR ParseComma();
FLATBUFFERS_CHECKED_ERROR ParseAnyValue(Value &val, FieldDef *field,
size_t parent_fieldn,
@ -891,19 +999,21 @@ class Parser : public ParserState {
FLATBUFFERS_CHECKED_ERROR TokenError();
FLATBUFFERS_CHECKED_ERROR ParseSingleValue(const std::string *name, Value &e,
bool check_now);
FLATBUFFERS_CHECKED_ERROR ParseFunction(const std::string *name, Value &e);
FLATBUFFERS_CHECKED_ERROR ParseEnumFromString(const Type &type,
std::string *result);
StructDef *LookupCreateStruct(const std::string &name,
bool create_if_new = true,
bool definition = false);
FLATBUFFERS_CHECKED_ERROR ParseEnum(bool is_union, EnumDef **dest);
FLATBUFFERS_CHECKED_ERROR ParseEnum(bool is_union, EnumDef **dest,
const char *filename);
FLATBUFFERS_CHECKED_ERROR ParseNamespace();
FLATBUFFERS_CHECKED_ERROR StartStruct(const std::string &name,
StructDef **dest);
FLATBUFFERS_CHECKED_ERROR StartEnum(const std::string &name, bool is_union,
EnumDef **dest);
FLATBUFFERS_CHECKED_ERROR ParseDecl();
FLATBUFFERS_CHECKED_ERROR ParseService();
FLATBUFFERS_CHECKED_ERROR ParseDecl(const char *filename);
FLATBUFFERS_CHECKED_ERROR ParseService(const char *filename);
FLATBUFFERS_CHECKED_ERROR ParseProtoFields(StructDef *struct_def,
bool isextend, bool inside_oneof);
FLATBUFFERS_CHECKED_ERROR ParseProtoOption();
@ -912,27 +1022,39 @@ class Parser : public ParserState {
FLATBUFFERS_CHECKED_ERROR ParseProtoCurliesOrIdent();
FLATBUFFERS_CHECKED_ERROR ParseTypeFromProtoType(Type *type);
FLATBUFFERS_CHECKED_ERROR SkipAnyJsonValue();
FLATBUFFERS_CHECKED_ERROR ParseFlexBufferNumericConstant(
flexbuffers::Builder *builder);
FLATBUFFERS_CHECKED_ERROR ParseFlexBufferValue(flexbuffers::Builder *builder);
FLATBUFFERS_CHECKED_ERROR StartParseFile(const char *source,
const char *source_filename);
FLATBUFFERS_CHECKED_ERROR ParseRoot(const char *_source,
const char **include_paths,
const char *source_filename);
FLATBUFFERS_CHECKED_ERROR CheckPrivateLeak();
FLATBUFFERS_CHECKED_ERROR CheckPrivatelyLeakedFields(
const Definition &def, const Definition &value_type);
FLATBUFFERS_CHECKED_ERROR DoParse(const char *_source,
const char **include_paths,
const char *source_filename,
const char *include_filename);
FLATBUFFERS_CHECKED_ERROR DoParseJson();
FLATBUFFERS_CHECKED_ERROR CheckClash(std::vector<FieldDef *> &fields,
StructDef *struct_def,
const char *suffix, BaseType baseType);
FLATBUFFERS_CHECKED_ERROR ParseAlignAttribute(
const std::string &align_constant, size_t min_align, size_t *align);
bool SupportsAdvancedUnionFeatures() const;
bool SupportsAdvancedArrayFeatures() const;
bool SupportsOptionalScalars() const;
bool SupportsDefaultVectorsAndStrings() const;
Namespace *UniqueNamespace(Namespace *ns);
FLATBUFFERS_CHECKED_ERROR RecurseError();
template<typename F> CheckedError Recurse(F f);
const std::string &GetPooledString(const std::string &s) const;
public:
SymbolTable<Type> types_;
SymbolTable<StructDef> structs_;
@ -950,32 +1072,35 @@ class Parser : public ParserState {
std::string file_identifier_;
std::string file_extension_;
std::map<std::string, std::string> included_files_;
std::map<std::string, std::set<std::string>> files_included_per_file_;
std::map<uint64_t, std::string> included_files_;
std::map<std::string, std::set<IncludedFile>> files_included_per_file_;
std::vector<std::string> native_included_files_;
std::map<std::string, bool> known_attributes_;
IDLOptions opts;
bool uses_flexbuffers_;
bool has_warning_;
uint64_t advanced_features_;
std::string file_being_parsed_;
private:
const char *source_;
std::string file_being_parsed_;
std::vector<std::pair<Value, FieldDef *>> field_stack_;
int anonymous_counter;
int recurse_protection_counter;
// TODO(cneo): Refactor parser to use string_cache more often to save
// on memory usage.
mutable std::set<std::string> string_cache_;
int anonymous_counter_;
int parse_depth_counter_; // stack-overflow guard
};
// Utility functions for multiple generators:
extern std::string MakeCamel(const std::string &in, bool first = true);
extern std::string MakeScreamingCamel(const std::string &in);
// Generate text (JSON) from a given FlatBuffer, and a given Parser
// object that has been populated with the corresponding schema.
// If ident_step is 0, no indentation will be generated. Additionally,
@ -992,6 +1117,10 @@ extern bool GenerateText(const Parser &parser, const void *flatbuffer,
extern bool GenerateTextFile(const Parser &parser, const std::string &path,
const std::string &file_name);
// Generate Json schema to string
// See idl_gen_json_schema.cpp.
extern bool GenerateJsonSchema(const Parser &parser, std::string *json);
// Generate binary files from a given FlatBuffer, and a given Parser
// object that has been populated with the corresponding schema.
// See code_generators.cpp.
@ -1018,8 +1147,8 @@ extern bool GenerateJava(const Parser &parser, const std::string &path,
// Generate JavaScript or TypeScript code from the definitions in the Parser
// object. See idl_gen_js.
extern bool GenerateJSTS(const Parser &parser, const std::string &path,
const std::string &file_name);
extern bool GenerateTS(const Parser &parser, const std::string &path,
const std::string &file_name);
// Generate Go files from the definitions in the Parser object.
// See idl_gen_go.cpp.
@ -1071,10 +1200,10 @@ extern std::string GenerateFBS(const Parser &parser,
extern bool GenerateFBS(const Parser &parser, const std::string &path,
const std::string &file_name);
// Generate a make rule for the generated JavaScript or TypeScript code.
// See idl_gen_js.cpp.
extern std::string JSTSMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name);
// Generate a make rule for the generated TypeScript code.
// See idl_gen_ts.cpp.
extern std::string TSMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name);
// Generate a make rule for the generated C++ header.
// See idl_gen_cpp.cpp.
@ -1093,9 +1222,10 @@ extern std::string RustMakeRule(const Parser &parser, const std::string &path,
// Generate a make rule for generated Java or C# files.
// See code_generators.cpp.
extern std::string JavaCSharpMakeRule(const Parser &parser,
const std::string &path,
const std::string &file_name);
extern std::string CSharpMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name);
extern std::string JavaMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name);
// Generate a make rule for the generated text (JSON) files.
// See idl_gen_text.cpp.
@ -1132,6 +1262,11 @@ bool GeneratePythonGRPC(const Parser &parser, const std::string &path,
extern bool GenerateSwiftGRPC(const Parser &parser, const std::string &path,
const std::string &file_name);
extern bool GenerateTSGRPC(const Parser &parser, const std::string &path,
const std::string &file_name);
extern bool GenerateRustModuleRootFile(const Parser &parser,
const std::string &path);
} // namespace flatbuffers
#endif // FLATBUFFERS_IDL_H_

View File

@ -38,7 +38,7 @@ struct IterationVisitor {
// These mark the scope of a table or struct.
virtual void StartSequence() {}
virtual void EndSequence() {}
// Called for each field regardless of wether it is present or not.
// Called for each field regardless of whether it is present or not.
// If not present, val == nullptr. set_idx is the index of all set fields.
virtual void Field(size_t /*field_idx*/, size_t /*set_idx*/,
ElementaryType /*type*/, bool /*is_vector*/,
@ -234,10 +234,11 @@ inline void IterateObject(const uint8_t *obj, const TypeTable *type_table,
visitor->StartSequence();
const uint8_t *prev_val = nullptr;
size_t set_idx = 0;
size_t array_idx = 0;
for (size_t i = 0; i < type_table->num_elems; i++) {
auto type_code = type_table->type_codes[i];
auto type = static_cast<ElementaryType>(type_code.base_type);
auto is_vector = type_code.is_vector != 0;
auto is_repeating = type_code.is_repeating != 0;
auto ref_idx = type_code.sequence_ref;
const TypeTable *ref = nullptr;
if (ref_idx >= 0) { ref = type_table->type_refs[ref_idx](); }
@ -249,15 +250,25 @@ inline void IterateObject(const uint8_t *obj, const TypeTable *type_table,
} else {
val = obj + type_table->values[i];
}
visitor->Field(i, set_idx, type, is_vector, ref, name, val);
visitor->Field(i, set_idx, type, is_repeating, ref, name, val);
if (val) {
set_idx++;
if (is_vector) {
val += ReadScalar<uoffset_t>(val);
auto vec = reinterpret_cast<const Vector<uint8_t> *>(val);
if (is_repeating) {
auto elem_ptr = val;
size_t size = 0;
if (type_table->st == ST_TABLE) {
// variable length vector
val += ReadScalar<uoffset_t>(val);
auto vec = reinterpret_cast<const Vector<uint8_t> *>(val);
elem_ptr = vec->Data();
size = vec->size();
} else {
// otherwise fixed size array
size = type_table->array_sizes[array_idx];
++array_idx;
}
visitor->StartVector();
auto elem_ptr = vec->Data();
for (size_t j = 0; j < vec->size(); j++) {
for (size_t j = 0; j < size; j++) {
visitor->Element(j, type, ref, elem_ptr);
IterateValue(type, elem_ptr, ref, prev_val, static_cast<soffset_t>(j),
visitor);

View File

@ -0,0 +1,39 @@
/*
* Copyright 2017 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_FLATC_PCH_H_
#define FLATBUFFERS_FLATC_PCH_H_
// stl
#include <cmath>
#include <sstream>
#include <cassert>
#include <unordered_set>
#include <unordered_map>
#include <iostream>
#include <functional>
#include <set>
#include <iterator>
#include <tuple>
// flatbuffers
#include "flatbuffers/pch/pch.h"
#include "flatbuffers/code_generators.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
#include "flatbuffers/idl.h"
#endif // FLATBUFFERS_FLATC_PCH_H_

View File

@ -0,0 +1,38 @@
/*
* Copyright 2017 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_PCH_H_
#define FLATBUFFERS_PCH_H_
// stl
#include <cstdint>
#include <cstring>
#include <algorithm>
#include <list>
#include <string>
#include <utility>
#include <iomanip>
#include <map>
#include <memory>
#include <limits>
#include <stack>
#include <vector>
#include <type_traits>
// flatbuffers
#include "flatbuffers/util.h"
#endif // FLATBUFFERS_PCH_H_

View File

@ -21,7 +21,7 @@
// file) is needed to generate this header in the first place.
// Should normally not be a problem since it can be generated by the
// previous version of flatc whenever this code needs to change.
// See reflection/generate_code.sh
// See scripts/generate_code.py for generation.
#include "flatbuffers/reflection_generated.h"
// Helper functionality for reflection.
@ -46,7 +46,32 @@ inline bool IsLong(reflection::BaseType t) {
// Size of a basic type, don't use with structs.
inline size_t GetTypeSize(reflection::BaseType base_type) {
// This needs to correspond to the BaseType enum.
static size_t sizes[] = { 0, 1, 1, 1, 1, 2, 2, 4, 4, 8, 8, 4, 8, 4, 4, 4, 4 };
static size_t sizes[] = {
0, // None
1, // UType
1, // Bool
1, // Byte
1, // UByte
2, // Short
2, // UShort
4, // Int
4, // UInt
8, // Long
8, // ULong
4, // Float
8, // Double
4, // String
4, // Vector
4, // Obj
4, // Union
0, // Array. Only used in structs. 0 was chosen to prevent out-of-bounds
// errors.
0 // MaxBaseType. This must be kept the last entry in this array.
};
static_assert(sizeof(sizes) / sizeof(size_t) == reflection::MaxBaseType + 1,
"Size of sizes[] array does not match the count of BaseType "
"enum values.");
return sizes[base_type];
}
@ -63,13 +88,22 @@ inline size_t GetTypeSizeInline(reflection::BaseType base_type, int type_index,
}
// Get the root, regardless of what type it is.
inline Table *GetAnyRoot(uint8_t *flatbuf) {
inline Table *GetAnyRoot(uint8_t *const flatbuf) {
return GetMutableRoot<Table>(flatbuf);
}
inline const Table *GetAnyRoot(const uint8_t *flatbuf) {
inline const Table *GetAnyRoot(const uint8_t *const flatbuf) {
return GetRoot<Table>(flatbuf);
}
inline Table *GetAnySizePrefixedRoot(uint8_t *const flatbuf) {
return GetMutableSizePrefixedRoot<Table>(flatbuf);
}
inline const Table *GetAnySizePrefixedRoot(const uint8_t *const flatbuf) {
return GetSizePrefixedRoot<Table>(flatbuf);
}
// Get a field's default, if you know it's an integer, and its exact type.
template<typename T> T GetFieldDefaultI(const reflection::Field &field) {
FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
@ -254,6 +288,12 @@ T *GetAnyFieldAddressOf(const Struct &st, const reflection::Field &field) {
return reinterpret_cast<T *>(st.GetAddressOf(field.offset()));
}
// Loop over all the fields of the provided `object` and call `func` on each one
// in increasing order by their field->id(). If `reverse` is true, `func` is
// called in descending order
void ForAllFields(const reflection::Object *object, bool reverse,
std::function<void(const reflection::Field *)> func);
// ------------------------- SETTERS -------------------------
// Set any scalar field, if you know its exact type.
@ -354,15 +394,14 @@ template<typename T, typename U> class pointer_inside_vector {
public:
pointer_inside_vector(T *ptr, std::vector<U> &vec)
: offset_(reinterpret_cast<uint8_t *>(ptr) -
reinterpret_cast<uint8_t *>(flatbuffers::vector_data(vec))),
reinterpret_cast<uint8_t *>(vec.data())),
vec_(vec) {}
T *operator*() const {
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(flatbuffers::vector_data(vec_)) + offset_);
return reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(vec_.data()) +
offset_);
}
T *operator->() const { return operator*(); }
void operator=(const pointer_inside_vector &piv);
private:
size_t offset_;
@ -388,7 +427,7 @@ inline const reflection::Object &GetUnionType(
FLATBUFFERS_ASSERT(type_field);
auto union_type = GetFieldI<uint8_t>(table, *type_field);
auto enumval = enumdef->values()->LookupByKey(union_type);
return *enumval->object();
return *schema.objects()->Get(enumval->union_type()->index());
}
// Changes the contents of a string inside a FlatBuffer. FlatBuffer must
@ -470,7 +509,13 @@ Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
// buf should point to the start of flatbuffer data.
// length specifies the size of the flatbuffer data.
bool Verify(const reflection::Schema &schema, const reflection::Object &root,
const uint8_t *buf, size_t length);
const uint8_t *buf, size_t length, uoffset_t max_depth = 64,
uoffset_t max_tables = 1000000);
bool VerifySizePrefixed(const reflection::Schema &schema,
const reflection::Object &root, const uint8_t *buf,
size_t length, uoffset_t max_depth = 64,
uoffset_t max_tables = 1000000);
} // namespace flatbuffers

View File

@ -6,6 +6,13 @@
#include "flatbuffers/flatbuffers.h"
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 22 &&
FLATBUFFERS_VERSION_MINOR == 9 &&
FLATBUFFERS_VERSION_REVISION == 24,
"Non-compatible flatbuffers version included");
namespace reflection {
struct Type;
@ -32,6 +39,9 @@ struct RPCCallBuilder;
struct Service;
struct ServiceBuilder;
struct SchemaFile;
struct SchemaFileBuilder;
struct Schema;
struct SchemaBuilder;
@ -53,10 +63,11 @@ enum BaseType {
Vector = 14,
Obj = 15,
Union = 16,
Array = 17
Array = 17,
MaxBaseType = 18
};
inline const BaseType (&EnumValuesBaseType())[18] {
inline const BaseType (&EnumValuesBaseType())[19] {
static const BaseType values[] = {
None,
UType,
@ -75,13 +86,14 @@ inline const BaseType (&EnumValuesBaseType())[18] {
Vector,
Obj,
Union,
Array
Array,
MaxBaseType
};
return values;
}
inline const char * const *EnumNamesBaseType() {
static const char * const names[19] = {
static const char * const names[20] = {
"None",
"UType",
"Bool",
@ -100,24 +112,66 @@ inline const char * const *EnumNamesBaseType() {
"Obj",
"Union",
"Array",
"MaxBaseType",
nullptr
};
return names;
}
inline const char *EnumNameBaseType(BaseType e) {
if (flatbuffers::IsOutRange(e, None, Array)) return "";
if (flatbuffers::IsOutRange(e, None, MaxBaseType)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesBaseType()[index];
}
/// New schema language features that are not supported by old code generators.
enum AdvancedFeatures {
AdvancedArrayFeatures = 1ULL,
AdvancedUnionFeatures = 2ULL,
OptionalScalars = 4ULL,
DefaultVectorsAndStrings = 8ULL
};
inline const AdvancedFeatures (&EnumValuesAdvancedFeatures())[4] {
static const AdvancedFeatures values[] = {
AdvancedArrayFeatures,
AdvancedUnionFeatures,
OptionalScalars,
DefaultVectorsAndStrings
};
return values;
}
inline const char * const *EnumNamesAdvancedFeatures() {
static const char * const names[9] = {
"AdvancedArrayFeatures",
"AdvancedUnionFeatures",
"",
"OptionalScalars",
"",
"",
"",
"DefaultVectorsAndStrings",
nullptr
};
return names;
}
inline const char *EnumNameAdvancedFeatures(AdvancedFeatures e) {
if (flatbuffers::IsOutRange(e, AdvancedArrayFeatures, DefaultVectorsAndStrings)) return "";
const size_t index = static_cast<size_t>(e) - static_cast<size_t>(AdvancedArrayFeatures);
return EnumNamesAdvancedFeatures()[index];
}
struct Type FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TypeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_BASE_TYPE = 4,
VT_ELEMENT = 6,
VT_INDEX = 8,
VT_FIXED_LENGTH = 10
VT_FIXED_LENGTH = 10,
VT_BASE_SIZE = 12,
VT_ELEMENT_SIZE = 14
};
reflection::BaseType base_type() const {
return static_cast<reflection::BaseType>(GetField<int8_t>(VT_BASE_TYPE, 0));
@ -131,12 +185,22 @@ struct Type FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
uint16_t fixed_length() const {
return GetField<uint16_t>(VT_FIXED_LENGTH, 0);
}
/// The size (octets) of the `base_type` field.
uint32_t base_size() const {
return GetField<uint32_t>(VT_BASE_SIZE, 4);
}
/// The size (octets) of the `element` field, if present.
uint32_t element_size() const {
return GetField<uint32_t>(VT_ELEMENT_SIZE, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_BASE_TYPE) &&
VerifyField<int8_t>(verifier, VT_ELEMENT) &&
VerifyField<int32_t>(verifier, VT_INDEX) &&
VerifyField<uint16_t>(verifier, VT_FIXED_LENGTH) &&
VerifyField<int8_t>(verifier, VT_BASE_TYPE, 1) &&
VerifyField<int8_t>(verifier, VT_ELEMENT, 1) &&
VerifyField<int32_t>(verifier, VT_INDEX, 4) &&
VerifyField<uint16_t>(verifier, VT_FIXED_LENGTH, 2) &&
VerifyField<uint32_t>(verifier, VT_BASE_SIZE, 4) &&
VerifyField<uint32_t>(verifier, VT_ELEMENT_SIZE, 4) &&
verifier.EndTable();
}
};
@ -157,11 +221,16 @@ struct TypeBuilder {
void add_fixed_length(uint16_t fixed_length) {
fbb_.AddElement<uint16_t>(Type::VT_FIXED_LENGTH, fixed_length, 0);
}
void add_base_size(uint32_t base_size) {
fbb_.AddElement<uint32_t>(Type::VT_BASE_SIZE, base_size, 4);
}
void add_element_size(uint32_t element_size) {
fbb_.AddElement<uint32_t>(Type::VT_ELEMENT_SIZE, element_size, 0);
}
explicit TypeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
TypeBuilder &operator=(const TypeBuilder &);
flatbuffers::Offset<Type> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Type>(end);
@ -174,8 +243,12 @@ inline flatbuffers::Offset<Type> CreateType(
reflection::BaseType base_type = reflection::None,
reflection::BaseType element = reflection::None,
int32_t index = -1,
uint16_t fixed_length = 0) {
uint16_t fixed_length = 0,
uint32_t base_size = 4,
uint32_t element_size = 0) {
TypeBuilder builder_(_fbb);
builder_.add_element_size(element_size);
builder_.add_base_size(base_size);
builder_.add_index(index);
builder_.add_fixed_length(fixed_length);
builder_.add_element(element);
@ -195,8 +268,8 @@ struct KeyValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const KeyValue *o) const {
return *key() < *o->key();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(key()->c_str(), val);
int KeyCompareWithValue(const char *_key) const {
return strcmp(key()->c_str(), _key);
}
const flatbuffers::String *value() const {
return GetPointer<const flatbuffers::String *>(VT_VALUE);
@ -225,7 +298,6 @@ struct KeyValueBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
KeyValueBuilder &operator=(const KeyValueBuilder &);
flatbuffers::Offset<KeyValue> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<KeyValue>(end);
@ -261,7 +333,6 @@ struct EnumVal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
VT_VALUE = 6,
VT_OBJECT = 8,
VT_UNION_TYPE = 10,
VT_DOCUMENTATION = 12
};
@ -274,11 +345,8 @@ struct EnumVal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const EnumVal *o) const {
return value() < o->value();
}
int KeyCompareWithValue(int64_t val) const {
return static_cast<int>(value() > val) - static_cast<int>(value() < val);
}
const reflection::Object *object() const {
return GetPointer<const reflection::Object *>(VT_OBJECT);
int KeyCompareWithValue(int64_t _value) const {
return static_cast<int>(value() > _value) - static_cast<int>(value() < _value);
}
const reflection::Type *union_type() const {
return GetPointer<const reflection::Type *>(VT_UNION_TYPE);
@ -290,9 +358,7 @@ struct EnumVal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyField<int64_t>(verifier, VT_VALUE) &&
VerifyOffset(verifier, VT_OBJECT) &&
verifier.VerifyTable(object()) &&
VerifyField<int64_t>(verifier, VT_VALUE, 8) &&
VerifyOffset(verifier, VT_UNION_TYPE) &&
verifier.VerifyTable(union_type()) &&
VerifyOffset(verifier, VT_DOCUMENTATION) &&
@ -312,9 +378,6 @@ struct EnumValBuilder {
void add_value(int64_t value) {
fbb_.AddElement<int64_t>(EnumVal::VT_VALUE, value, 0);
}
void add_object(flatbuffers::Offset<reflection::Object> object) {
fbb_.AddOffset(EnumVal::VT_OBJECT, object);
}
void add_union_type(flatbuffers::Offset<reflection::Type> union_type) {
fbb_.AddOffset(EnumVal::VT_UNION_TYPE, union_type);
}
@ -325,7 +388,6 @@ struct EnumValBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
EnumValBuilder &operator=(const EnumValBuilder &);
flatbuffers::Offset<EnumVal> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<EnumVal>(end);
@ -338,14 +400,12 @@ inline flatbuffers::Offset<EnumVal> CreateEnumVal(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> name = 0,
int64_t value = 0,
flatbuffers::Offset<reflection::Object> object = 0,
flatbuffers::Offset<reflection::Type> union_type = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
EnumValBuilder builder_(_fbb);
builder_.add_value(value);
builder_.add_documentation(documentation);
builder_.add_union_type(union_type);
builder_.add_object(object);
builder_.add_name(name);
return builder_.Finish();
}
@ -354,7 +414,6 @@ inline flatbuffers::Offset<EnumVal> CreateEnumValDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *name = nullptr,
int64_t value = 0,
flatbuffers::Offset<reflection::Object> object = 0,
flatbuffers::Offset<reflection::Type> union_type = 0,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
@ -363,7 +422,6 @@ inline flatbuffers::Offset<EnumVal> CreateEnumValDirect(
_fbb,
name__,
value,
object,
union_type,
documentation__);
}
@ -376,7 +434,8 @@ struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_IS_UNION = 8,
VT_UNDERLYING_TYPE = 10,
VT_ATTRIBUTES = 12,
VT_DOCUMENTATION = 14
VT_DOCUMENTATION = 14,
VT_DECLARATION_FILE = 16
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
@ -384,8 +443,8 @@ struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const Enum *o) const {
return *name() < *o->name();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(name()->c_str(), val);
int KeyCompareWithValue(const char *_name) const {
return strcmp(name()->c_str(), _name);
}
const flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>> *values() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>> *>(VT_VALUES);
@ -402,6 +461,10 @@ struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
}
/// File that this Enum is declared in.
const flatbuffers::String *declaration_file() const {
return GetPointer<const flatbuffers::String *>(VT_DECLARATION_FILE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
@ -409,7 +472,7 @@ struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffsetRequired(verifier, VT_VALUES) &&
verifier.VerifyVector(values()) &&
verifier.VerifyVectorOfTables(values()) &&
VerifyField<uint8_t>(verifier, VT_IS_UNION) &&
VerifyField<uint8_t>(verifier, VT_IS_UNION, 1) &&
VerifyOffsetRequired(verifier, VT_UNDERLYING_TYPE) &&
verifier.VerifyTable(underlying_type()) &&
VerifyOffset(verifier, VT_ATTRIBUTES) &&
@ -418,6 +481,8 @@ struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_DOCUMENTATION) &&
verifier.VerifyVector(documentation()) &&
verifier.VerifyVectorOfStrings(documentation()) &&
VerifyOffset(verifier, VT_DECLARATION_FILE) &&
verifier.VerifyString(declaration_file()) &&
verifier.EndTable();
}
};
@ -444,11 +509,13 @@ struct EnumBuilder {
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Enum::VT_DOCUMENTATION, documentation);
}
void add_declaration_file(flatbuffers::Offset<flatbuffers::String> declaration_file) {
fbb_.AddOffset(Enum::VT_DECLARATION_FILE, declaration_file);
}
explicit EnumBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
EnumBuilder &operator=(const EnumBuilder &);
flatbuffers::Offset<Enum> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Enum>(end);
@ -466,8 +533,10 @@ inline flatbuffers::Offset<Enum> CreateEnum(
bool is_union = false,
flatbuffers::Offset<reflection::Type> underlying_type = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0,
flatbuffers::Offset<flatbuffers::String> declaration_file = 0) {
EnumBuilder builder_(_fbb);
builder_.add_declaration_file(declaration_file);
builder_.add_documentation(documentation);
builder_.add_attributes(attributes);
builder_.add_underlying_type(underlying_type);
@ -484,11 +553,13 @@ inline flatbuffers::Offset<Enum> CreateEnumDirect(
bool is_union = false,
flatbuffers::Offset<reflection::Type> underlying_type = 0,
std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr,
const char *declaration_file = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto values__ = values ? _fbb.CreateVectorOfSortedTables<reflection::EnumVal>(values) : 0;
auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
auto declaration_file__ = declaration_file ? _fbb.CreateString(declaration_file) : 0;
return reflection::CreateEnum(
_fbb,
name__,
@ -496,7 +567,8 @@ inline flatbuffers::Offset<Enum> CreateEnumDirect(
is_union,
underlying_type,
attributes__,
documentation__);
documentation__,
declaration_file__);
}
struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -512,7 +584,9 @@ struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_REQUIRED = 18,
VT_KEY = 20,
VT_ATTRIBUTES = 22,
VT_DOCUMENTATION = 24
VT_DOCUMENTATION = 24,
VT_OPTIONAL = 26,
VT_PADDING = 28
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
@ -520,8 +594,8 @@ struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const Field *o) const {
return *name() < *o->name();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(name()->c_str(), val);
int KeyCompareWithValue(const char *_name) const {
return strcmp(name()->c_str(), _name);
}
const reflection::Type *type() const {
return GetPointer<const reflection::Type *>(VT_TYPE);
@ -553,25 +627,34 @@ struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
}
bool optional() const {
return GetField<uint8_t>(VT_OPTIONAL, 0) != 0;
}
/// Number of padding octets to always add after this field. Structs only.
uint16_t padding() const {
return GetField<uint16_t>(VT_PADDING, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffsetRequired(verifier, VT_TYPE) &&
verifier.VerifyTable(type()) &&
VerifyField<uint16_t>(verifier, VT_ID) &&
VerifyField<uint16_t>(verifier, VT_OFFSET) &&
VerifyField<int64_t>(verifier, VT_DEFAULT_INTEGER) &&
VerifyField<double>(verifier, VT_DEFAULT_REAL) &&
VerifyField<uint8_t>(verifier, VT_DEPRECATED) &&
VerifyField<uint8_t>(verifier, VT_REQUIRED) &&
VerifyField<uint8_t>(verifier, VT_KEY) &&
VerifyField<uint16_t>(verifier, VT_ID, 2) &&
VerifyField<uint16_t>(verifier, VT_OFFSET, 2) &&
VerifyField<int64_t>(verifier, VT_DEFAULT_INTEGER, 8) &&
VerifyField<double>(verifier, VT_DEFAULT_REAL, 8) &&
VerifyField<uint8_t>(verifier, VT_DEPRECATED, 1) &&
VerifyField<uint8_t>(verifier, VT_REQUIRED, 1) &&
VerifyField<uint8_t>(verifier, VT_KEY, 1) &&
VerifyOffset(verifier, VT_ATTRIBUTES) &&
verifier.VerifyVector(attributes()) &&
verifier.VerifyVectorOfTables(attributes()) &&
VerifyOffset(verifier, VT_DOCUMENTATION) &&
verifier.VerifyVector(documentation()) &&
verifier.VerifyVectorOfStrings(documentation()) &&
VerifyField<uint8_t>(verifier, VT_OPTIONAL, 1) &&
VerifyField<uint16_t>(verifier, VT_PADDING, 2) &&
verifier.EndTable();
}
};
@ -613,11 +696,16 @@ struct FieldBuilder {
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Field::VT_DOCUMENTATION, documentation);
}
void add_optional(bool optional) {
fbb_.AddElement<uint8_t>(Field::VT_OPTIONAL, static_cast<uint8_t>(optional), 0);
}
void add_padding(uint16_t padding) {
fbb_.AddElement<uint16_t>(Field::VT_PADDING, padding, 0);
}
explicit FieldBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
FieldBuilder &operator=(const FieldBuilder &);
flatbuffers::Offset<Field> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Field>(end);
@ -639,7 +727,9 @@ inline flatbuffers::Offset<Field> CreateField(
bool required = false,
bool key = false,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0,
bool optional = false,
uint16_t padding = 0) {
FieldBuilder builder_(_fbb);
builder_.add_default_real(default_real);
builder_.add_default_integer(default_integer);
@ -647,8 +737,10 @@ inline flatbuffers::Offset<Field> CreateField(
builder_.add_attributes(attributes);
builder_.add_type(type);
builder_.add_name(name);
builder_.add_padding(padding);
builder_.add_offset(offset);
builder_.add_id(id);
builder_.add_optional(optional);
builder_.add_key(key);
builder_.add_required(required);
builder_.add_deprecated(deprecated);
@ -667,7 +759,9 @@ inline flatbuffers::Offset<Field> CreateFieldDirect(
bool required = false,
bool key = false,
std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr,
bool optional = false,
uint16_t padding = 0) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
@ -683,7 +777,9 @@ inline flatbuffers::Offset<Field> CreateFieldDirect(
required,
key,
attributes__,
documentation__);
documentation__,
optional,
padding);
}
struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -695,7 +791,8 @@ struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_MINALIGN = 10,
VT_BYTESIZE = 12,
VT_ATTRIBUTES = 14,
VT_DOCUMENTATION = 16
VT_DOCUMENTATION = 16,
VT_DECLARATION_FILE = 18
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
@ -703,8 +800,8 @@ struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const Object *o) const {
return *name() < *o->name();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(name()->c_str(), val);
int KeyCompareWithValue(const char *_name) const {
return strcmp(name()->c_str(), _name);
}
const flatbuffers::Vector<flatbuffers::Offset<reflection::Field>> *fields() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Field>> *>(VT_FIELDS);
@ -724,6 +821,10 @@ struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
}
/// File that this Object is declared in.
const flatbuffers::String *declaration_file() const {
return GetPointer<const flatbuffers::String *>(VT_DECLARATION_FILE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
@ -731,15 +832,17 @@ struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffsetRequired(verifier, VT_FIELDS) &&
verifier.VerifyVector(fields()) &&
verifier.VerifyVectorOfTables(fields()) &&
VerifyField<uint8_t>(verifier, VT_IS_STRUCT) &&
VerifyField<int32_t>(verifier, VT_MINALIGN) &&
VerifyField<int32_t>(verifier, VT_BYTESIZE) &&
VerifyField<uint8_t>(verifier, VT_IS_STRUCT, 1) &&
VerifyField<int32_t>(verifier, VT_MINALIGN, 4) &&
VerifyField<int32_t>(verifier, VT_BYTESIZE, 4) &&
VerifyOffset(verifier, VT_ATTRIBUTES) &&
verifier.VerifyVector(attributes()) &&
verifier.VerifyVectorOfTables(attributes()) &&
VerifyOffset(verifier, VT_DOCUMENTATION) &&
verifier.VerifyVector(documentation()) &&
verifier.VerifyVectorOfStrings(documentation()) &&
VerifyOffset(verifier, VT_DECLARATION_FILE) &&
verifier.VerifyString(declaration_file()) &&
verifier.EndTable();
}
};
@ -769,11 +872,13 @@ struct ObjectBuilder {
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Object::VT_DOCUMENTATION, documentation);
}
void add_declaration_file(flatbuffers::Offset<flatbuffers::String> declaration_file) {
fbb_.AddOffset(Object::VT_DECLARATION_FILE, declaration_file);
}
explicit ObjectBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
ObjectBuilder &operator=(const ObjectBuilder &);
flatbuffers::Offset<Object> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Object>(end);
@ -791,8 +896,10 @@ inline flatbuffers::Offset<Object> CreateObject(
int32_t minalign = 0,
int32_t bytesize = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0,
flatbuffers::Offset<flatbuffers::String> declaration_file = 0) {
ObjectBuilder builder_(_fbb);
builder_.add_declaration_file(declaration_file);
builder_.add_documentation(documentation);
builder_.add_attributes(attributes);
builder_.add_bytesize(bytesize);
@ -811,11 +918,13 @@ inline flatbuffers::Offset<Object> CreateObjectDirect(
int32_t minalign = 0,
int32_t bytesize = 0,
std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr,
const char *declaration_file = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto fields__ = fields ? _fbb.CreateVectorOfSortedTables<reflection::Field>(fields) : 0;
auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
auto declaration_file__ = declaration_file ? _fbb.CreateString(declaration_file) : 0;
return reflection::CreateObject(
_fbb,
name__,
@ -824,7 +933,8 @@ inline flatbuffers::Offset<Object> CreateObjectDirect(
minalign,
bytesize,
attributes__,
documentation__);
documentation__,
declaration_file__);
}
struct RPCCall FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -842,8 +952,8 @@ struct RPCCall FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const RPCCall *o) const {
return *name() < *o->name();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(name()->c_str(), val);
int KeyCompareWithValue(const char *_name) const {
return strcmp(name()->c_str(), _name);
}
const reflection::Object *request() const {
return GetPointer<const reflection::Object *>(VT_REQUEST);
@ -898,7 +1008,6 @@ struct RPCCallBuilder {
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
RPCCallBuilder &operator=(const RPCCallBuilder &);
flatbuffers::Offset<RPCCall> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<RPCCall>(end);
@ -950,7 +1059,8 @@ struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_NAME = 4,
VT_CALLS = 6,
VT_ATTRIBUTES = 8,
VT_DOCUMENTATION = 10
VT_DOCUMENTATION = 10,
VT_DECLARATION_FILE = 12
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
@ -958,8 +1068,8 @@ struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool KeyCompareLessThan(const Service *o) const {
return *name() < *o->name();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(name()->c_str(), val);
int KeyCompareWithValue(const char *_name) const {
return strcmp(name()->c_str(), _name);
}
const flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>> *calls() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>> *>(VT_CALLS);
@ -970,6 +1080,10 @@ struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DOCUMENTATION);
}
/// File that this Service is declared in.
const flatbuffers::String *declaration_file() const {
return GetPointer<const flatbuffers::String *>(VT_DECLARATION_FILE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
@ -983,6 +1097,8 @@ struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_DOCUMENTATION) &&
verifier.VerifyVector(documentation()) &&
verifier.VerifyVectorOfStrings(documentation()) &&
VerifyOffset(verifier, VT_DECLARATION_FILE) &&
verifier.VerifyString(declaration_file()) &&
verifier.EndTable();
}
};
@ -1003,11 +1119,13 @@ struct ServiceBuilder {
void add_documentation(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation) {
fbb_.AddOffset(Service::VT_DOCUMENTATION, documentation);
}
void add_declaration_file(flatbuffers::Offset<flatbuffers::String> declaration_file) {
fbb_.AddOffset(Service::VT_DECLARATION_FILE, declaration_file);
}
explicit ServiceBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
ServiceBuilder &operator=(const ServiceBuilder &);
flatbuffers::Offset<Service> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Service>(end);
@ -1021,8 +1139,10 @@ inline flatbuffers::Offset<Service> CreateService(
flatbuffers::Offset<flatbuffers::String> name = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>>> calls = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0) {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation = 0,
flatbuffers::Offset<flatbuffers::String> declaration_file = 0) {
ServiceBuilder builder_(_fbb);
builder_.add_declaration_file(declaration_file);
builder_.add_documentation(documentation);
builder_.add_attributes(attributes);
builder_.add_calls(calls);
@ -1035,17 +1155,98 @@ inline flatbuffers::Offset<Service> CreateServiceDirect(
const char *name = nullptr,
std::vector<flatbuffers::Offset<reflection::RPCCall>> *calls = nullptr,
std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr) {
const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr,
const char *declaration_file = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto calls__ = calls ? _fbb.CreateVectorOfSortedTables<reflection::RPCCall>(calls) : 0;
auto attributes__ = attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
auto documentation__ = documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
auto declaration_file__ = declaration_file ? _fbb.CreateString(declaration_file) : 0;
return reflection::CreateService(
_fbb,
name__,
calls__,
attributes__,
documentation__);
documentation__,
declaration_file__);
}
/// File specific information.
/// Symbols declared within a file may be recovered by iterating over all
/// symbols and examining the `declaration_file` field.
struct SchemaFile FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef SchemaFileBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_FILENAME = 4,
VT_INCLUDED_FILENAMES = 6
};
/// Filename, relative to project root.
const flatbuffers::String *filename() const {
return GetPointer<const flatbuffers::String *>(VT_FILENAME);
}
bool KeyCompareLessThan(const SchemaFile *o) const {
return *filename() < *o->filename();
}
int KeyCompareWithValue(const char *_filename) const {
return strcmp(filename()->c_str(), _filename);
}
/// Names of included files, relative to project root.
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *included_filenames() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_INCLUDED_FILENAMES);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_FILENAME) &&
verifier.VerifyString(filename()) &&
VerifyOffset(verifier, VT_INCLUDED_FILENAMES) &&
verifier.VerifyVector(included_filenames()) &&
verifier.VerifyVectorOfStrings(included_filenames()) &&
verifier.EndTable();
}
};
struct SchemaFileBuilder {
typedef SchemaFile Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_filename(flatbuffers::Offset<flatbuffers::String> filename) {
fbb_.AddOffset(SchemaFile::VT_FILENAME, filename);
}
void add_included_filenames(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> included_filenames) {
fbb_.AddOffset(SchemaFile::VT_INCLUDED_FILENAMES, included_filenames);
}
explicit SchemaFileBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<SchemaFile> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<SchemaFile>(end);
fbb_.Required(o, SchemaFile::VT_FILENAME);
return o;
}
};
inline flatbuffers::Offset<SchemaFile> CreateSchemaFile(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> filename = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> included_filenames = 0) {
SchemaFileBuilder builder_(_fbb);
builder_.add_included_filenames(included_filenames);
builder_.add_filename(filename);
return builder_.Finish();
}
inline flatbuffers::Offset<SchemaFile> CreateSchemaFileDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *filename = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *included_filenames = nullptr) {
auto filename__ = filename ? _fbb.CreateString(filename) : 0;
auto included_filenames__ = included_filenames ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*included_filenames) : 0;
return reflection::CreateSchemaFile(
_fbb,
filename__,
included_filenames__);
}
struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@ -1056,7 +1257,9 @@ struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_FILE_IDENT = 8,
VT_FILE_EXT = 10,
VT_ROOT_TABLE = 12,
VT_SERVICES = 14
VT_SERVICES = 14,
VT_ADVANCED_FEATURES = 16,
VT_FBS_FILES = 18
};
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *>(VT_OBJECTS);
@ -1076,6 +1279,14 @@ struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<reflection::Service>> *services() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Service>> *>(VT_SERVICES);
}
reflection::AdvancedFeatures advanced_features() const {
return static_cast<reflection::AdvancedFeatures>(GetField<uint64_t>(VT_ADVANCED_FEATURES, 0));
}
/// All the files used in this compilation. Files are relative to where
/// flatc was invoked.
const flatbuffers::Vector<flatbuffers::Offset<reflection::SchemaFile>> *fbs_files() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::SchemaFile>> *>(VT_FBS_FILES);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_OBJECTS) &&
@ -1093,6 +1304,10 @@ struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_SERVICES) &&
verifier.VerifyVector(services()) &&
verifier.VerifyVectorOfTables(services()) &&
VerifyField<uint64_t>(verifier, VT_ADVANCED_FEATURES, 8) &&
VerifyOffset(verifier, VT_FBS_FILES) &&
verifier.VerifyVector(fbs_files()) &&
verifier.VerifyVectorOfTables(fbs_files()) &&
verifier.EndTable();
}
};
@ -1119,11 +1334,16 @@ struct SchemaBuilder {
void add_services(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services) {
fbb_.AddOffset(Schema::VT_SERVICES, services);
}
void add_advanced_features(reflection::AdvancedFeatures advanced_features) {
fbb_.AddElement<uint64_t>(Schema::VT_ADVANCED_FEATURES, static_cast<uint64_t>(advanced_features), 0);
}
void add_fbs_files(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::SchemaFile>>> fbs_files) {
fbb_.AddOffset(Schema::VT_FBS_FILES, fbs_files);
}
explicit SchemaBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
SchemaBuilder &operator=(const SchemaBuilder &);
flatbuffers::Offset<Schema> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Schema>(end);
@ -1140,8 +1360,12 @@ inline flatbuffers::Offset<Schema> CreateSchema(
flatbuffers::Offset<flatbuffers::String> file_ident = 0,
flatbuffers::Offset<flatbuffers::String> file_ext = 0,
flatbuffers::Offset<reflection::Object> root_table = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services = 0) {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services = 0,
reflection::AdvancedFeatures advanced_features = static_cast<reflection::AdvancedFeatures>(0),
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::SchemaFile>>> fbs_files = 0) {
SchemaBuilder builder_(_fbb);
builder_.add_advanced_features(advanced_features);
builder_.add_fbs_files(fbs_files);
builder_.add_services(services);
builder_.add_root_table(root_table);
builder_.add_file_ext(file_ext);
@ -1158,12 +1382,15 @@ inline flatbuffers::Offset<Schema> CreateSchemaDirect(
const char *file_ident = nullptr,
const char *file_ext = nullptr,
flatbuffers::Offset<reflection::Object> root_table = 0,
std::vector<flatbuffers::Offset<reflection::Service>> *services = nullptr) {
std::vector<flatbuffers::Offset<reflection::Service>> *services = nullptr,
reflection::AdvancedFeatures advanced_features = static_cast<reflection::AdvancedFeatures>(0),
std::vector<flatbuffers::Offset<reflection::SchemaFile>> *fbs_files = nullptr) {
auto objects__ = objects ? _fbb.CreateVectorOfSortedTables<reflection::Object>(objects) : 0;
auto enums__ = enums ? _fbb.CreateVectorOfSortedTables<reflection::Enum>(enums) : 0;
auto file_ident__ = file_ident ? _fbb.CreateString(file_ident) : 0;
auto file_ext__ = file_ext ? _fbb.CreateString(file_ext) : 0;
auto services__ = services ? _fbb.CreateVectorOfSortedTables<reflection::Service>(services) : 0;
auto fbs_files__ = fbs_files ? _fbb.CreateVectorOfSortedTables<reflection::SchemaFile>(fbs_files) : 0;
return reflection::CreateSchema(
_fbb,
objects__,
@ -1171,7 +1398,9 @@ inline flatbuffers::Offset<Schema> CreateSchemaDirect(
file_ident__,
file_ext__,
root_table,
services__);
services__,
advanced_features,
fbs_files__);
}
inline const reflection::Schema *GetSchema(const void *buf) {
@ -1191,6 +1420,11 @@ inline bool SchemaBufferHasIdentifier(const void *buf) {
buf, SchemaIdentifier());
}
inline bool SizePrefixedSchemaBufferHasIdentifier(const void *buf) {
return flatbuffers::BufferHasIdentifier(
buf, SchemaIdentifier(), true);
}
inline bool VerifySchemaBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<reflection::Schema>(SchemaIdentifier());

View File

@ -17,6 +17,7 @@
#ifndef FLATBUFFERS_REGISTRY_H_
#define FLATBUFFERS_REGISTRY_H_
#include "flatbuffers/base.h"
#include "flatbuffers/idl.h"
namespace flatbuffers {
@ -40,13 +41,13 @@ class Registry {
bool FlatBufferToText(const uint8_t *flatbuf, size_t len, std::string *dest) {
// Get the identifier out of the buffer.
// If the buffer is truncated, exit.
if (len < sizeof(uoffset_t) + FlatBufferBuilder::kFileIdentifierLength) {
if (len < sizeof(uoffset_t) + kFileIdentifierLength) {
lasterror_ = "buffer truncated";
return false;
}
std::string ident(
reinterpret_cast<const char *>(flatbuf) + sizeof(uoffset_t),
FlatBufferBuilder::kFileIdentifierLength);
kFileIdentifierLength);
// Load and parse the schema.
Parser parser;
if (!LoadSchema(ident, &parser)) return false;
@ -103,7 +104,7 @@ class Registry {
}
// Parse schema.
parser->opts = opts_;
if (!parser->Parse(schematext.c_str(), vector_data(include_paths_),
if (!parser->Parse(schematext.c_str(), include_paths_.data(),
schema.path_.c_str())) {
lasterror_ = parser->error_;
return false;

View File

@ -18,6 +18,7 @@
#define FLATBUFFERS_STL_EMULATION_H_
// clang-format off
#include "flatbuffers/base.h"
#include <string>
#include <type_traits>
@ -25,149 +26,66 @@
#include <memory>
#include <limits>
#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
#define FLATBUFFERS_CPP98_STL
#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
#ifndef FLATBUFFERS_USE_STD_OPTIONAL
// Detect C++17 compatible compiler.
// __cplusplus >= 201703L - a compiler has support of 'static inline' variables.
#if (defined(__cplusplus) && __cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define FLATBUFFERS_USE_STD_OPTIONAL 1
#else
#define FLATBUFFERS_USE_STD_OPTIONAL 0
#endif // (defined(__cplusplus) && __cplusplus >= 201703L) ...
#endif // FLATBUFFERS_USE_STD_OPTIONAL
#if defined(FLATBUFFERS_CPP98_STL)
#include <cctype>
#endif // defined(FLATBUFFERS_CPP98_STL)
// Check if we can use template aliases
// Not possible if Microsoft Compiler before 2012
// Possible is the language feature __cpp_alias_templates is defined well
// Or possible if the C++ std is C+11 or newer
#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
|| (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
|| (defined(__cplusplus) && __cplusplus >= 201103L)
#define FLATBUFFERS_TEMPLATES_ALIASES
#if FLATBUFFERS_USE_STD_OPTIONAL
#include <optional>
#endif
// This header provides backwards compatibility for C++98 STLs like stlport.
// The __cpp_lib_span is the predefined feature macro.
#if defined(FLATBUFFERS_USE_STD_SPAN)
#include <span>
#elif defined(__cpp_lib_span) && defined(__has_include)
#if __has_include(<span>)
#include <array>
#include <span>
#define FLATBUFFERS_USE_STD_SPAN
#endif
#else
// Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined.
#if !defined(FLATBUFFERS_TEMPLATES_ALIASES)
#define FLATBUFFERS_SPAN_MINIMAL
#else
// Enable implicit construction of a span<T,N> from a std::array<T,N>.
#include <array>
#endif
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
// This header provides backwards compatibility for older versions of the STL.
namespace flatbuffers {
// Retrieve ::back() from a string in a way that is compatible with pre C++11
// STLs (e.g stlport).
inline char& string_back(std::string &value) {
return value[value.length() - 1];
}
inline char string_back(const std::string &value) {
return value[value.length() - 1];
}
// Helper method that retrieves ::data() from a vector in a way that is
// compatible with pre C++11 STLs (e.g stlport).
template <typename T> inline T *vector_data(std::vector<T> &vector) {
// In some debug environments, operator[] does bounds checking, so &vector[0]
// can't be used.
return vector.empty() ? nullptr : &vector[0];
}
template <typename T> inline const T *vector_data(
const std::vector<T> &vector) {
return vector.empty() ? nullptr : &vector[0];
}
template <typename T, typename V>
inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
#if defined(FLATBUFFERS_CPP98_STL)
vector->push_back(data);
#else
vector->emplace_back(std::forward<V>(data));
#endif // defined(FLATBUFFERS_CPP98_STL)
}
#ifndef FLATBUFFERS_CPP98_STL
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <typename T>
using numeric_limits = std::numeric_limits<T>;
#else
template <typename T> class numeric_limits :
public std::numeric_limits<T> {};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <typename T>
using numeric_limits = std::numeric_limits<T>;
#else
template <typename T> class numeric_limits :
public std::numeric_limits<T> {
public:
// Android NDK fix.
static T lowest() {
return std::numeric_limits<T>::min();
}
};
template <> class numeric_limits<float> :
public std::numeric_limits<float> {
public:
static float lowest() { return -FLT_MAX; }
};
template <> class numeric_limits<double> :
public std::numeric_limits<double> {
public:
static double lowest() { return -DBL_MAX; }
};
template <> class numeric_limits<unsigned long long> {
public:
static unsigned long long min() { return 0ULL; }
static unsigned long long max() { return ~0ULL; }
static unsigned long long lowest() {
return numeric_limits<unsigned long long>::min();
}
};
template <> class numeric_limits<long long> {
public:
static long long min() {
return static_cast<long long>(1ULL << ((sizeof(long long) << 3) - 1));
}
static long long max() {
return static_cast<long long>(
(1ULL << ((sizeof(long long) << 3) - 1)) - 1);
}
static long long lowest() {
return numeric_limits<long long>::min();
}
};
#endif // FLATBUFFERS_CPP98_STL
public std::numeric_limits<T> {};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
#ifndef FLATBUFFERS_CPP98_STL
template <typename T> using is_scalar = std::is_scalar<T>;
template <typename T, typename U> using is_same = std::is_same<T,U>;
template <typename T> using is_floating_point = std::is_floating_point<T>;
template <typename T> using is_unsigned = std::is_unsigned<T>;
template <typename T> using is_enum = std::is_enum<T>;
template <typename T> using make_unsigned = std::make_unsigned<T>;
template<bool B, class T, class F>
using conditional = std::conditional<B, T, F>;
template<class T, T v>
using integral_constant = std::integral_constant<T, v>;
#else
// Map C++ TR1 templates defined by stlport.
template <typename T> using is_scalar = std::tr1::is_scalar<T>;
template <typename T, typename U> using is_same = std::tr1::is_same<T,U>;
template <typename T> using is_floating_point =
std::tr1::is_floating_point<T>;
template <typename T> using is_unsigned = std::tr1::is_unsigned<T>;
template <typename T> using is_enum = std::tr1::is_enum<T>;
// Android NDK doesn't have std::make_unsigned or std::tr1::make_unsigned.
template<typename T> struct make_unsigned {
static_assert(is_unsigned<T>::value, "Specialization not implemented!");
using type = T;
};
template<> struct make_unsigned<char> { using type = unsigned char; };
template<> struct make_unsigned<short> { using type = unsigned short; };
template<> struct make_unsigned<int> { using type = unsigned int; };
template<> struct make_unsigned<long> { using type = unsigned long; };
template<>
struct make_unsigned<long long> { using type = unsigned long long; };
template<bool B, class T, class F>
using conditional = std::tr1::conditional<B, T, F>;
template<class T, T v>
using integral_constant = std::tr1::integral_constant<T, v>;
#endif // !FLATBUFFERS_CPP98_STL
template <typename T> using is_scalar = std::is_scalar<T>;
template <typename T, typename U> using is_same = std::is_same<T,U>;
template <typename T> using is_floating_point = std::is_floating_point<T>;
template <typename T> using is_unsigned = std::is_unsigned<T>;
template <typename T> using is_enum = std::is_enum<T>;
template <typename T> using make_unsigned = std::make_unsigned<T>;
template<bool B, class T, class F>
using conditional = std::conditional<B, T, F>;
template<class T, T v>
using integral_constant = std::integral_constant<T, v>;
template <bool B>
using bool_constant = integral_constant<bool, B>;
using true_type = std::true_type;
using false_type = std::false_type;
#else
// MSVC 2010 doesn't support C++11 aliases.
template <typename T> struct is_scalar : public std::is_scalar<T> {};
@ -181,126 +99,411 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
struct conditional : public std::conditional<B, T, F> {};
template<class T, T v>
struct integral_constant : public std::integral_constant<T, v> {};
template <bool B>
struct bool_constant : public integral_constant<bool, B> {};
typedef bool_constant<true> true_type;
typedef bool_constant<false> false_type;
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#ifndef FLATBUFFERS_CPP98_STL
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <class T> using unique_ptr = std::unique_ptr<T>;
#else
// MSVC 2010 doesn't support C++11 aliases.
// We're manually "aliasing" the class here as we want to bring unique_ptr
// into the flatbuffers namespace. We have unique_ptr in the flatbuffers
// namespace we have a completely independent implemenation (see below)
// for C++98 STL implementations.
template <class T> class unique_ptr : public std::unique_ptr<T> {
public:
unique_ptr() {}
explicit unique_ptr(T* p) : std::unique_ptr<T>(p) {}
unique_ptr(std::unique_ptr<T>&& u) { *this = std::move(u); }
unique_ptr(unique_ptr&& u) { *this = std::move(u); }
unique_ptr& operator=(std::unique_ptr<T>&& u) {
std::unique_ptr<T>::reset(u.release());
return *this;
}
unique_ptr& operator=(unique_ptr&& u) {
std::unique_ptr<T>::reset(u.release());
return *this;
}
unique_ptr& operator=(T* p) {
return std::unique_ptr<T>::operator=(p);
}
};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <class T> using unique_ptr = std::unique_ptr<T>;
#else
// Very limited implementation of unique_ptr.
// This is provided simply to allow the C++ code generated from the default
// settings to function in C++98 environments with no modifications.
template <class T> class unique_ptr {
public:
typedef T element_type;
unique_ptr() : ptr_(nullptr) {}
explicit unique_ptr(T* p) : ptr_(p) {}
unique_ptr(unique_ptr&& u) : ptr_(nullptr) { reset(u.release()); }
unique_ptr(const unique_ptr& u) : ptr_(nullptr) {
reset(const_cast<unique_ptr*>(&u)->release());
}
~unique_ptr() { reset(); }
unique_ptr& operator=(const unique_ptr& u) {
reset(const_cast<unique_ptr*>(&u)->release());
// MSVC 2010 doesn't support C++11 aliases.
// We're manually "aliasing" the class here as we want to bring unique_ptr
// into the flatbuffers namespace. We have unique_ptr in the flatbuffers
// namespace we have a completely independent implementation (see below)
// for C++98 STL implementations.
template <class T> class unique_ptr : public std::unique_ptr<T> {
public:
unique_ptr() {}
explicit unique_ptr(T* p) : std::unique_ptr<T>(p) {}
unique_ptr(std::unique_ptr<T>&& u) { *this = std::move(u); }
unique_ptr(unique_ptr&& u) { *this = std::move(u); }
unique_ptr& operator=(std::unique_ptr<T>&& u) {
std::unique_ptr<T>::reset(u.release());
return *this;
}
unique_ptr& operator=(unique_ptr&& u) {
reset(u.release());
std::unique_ptr<T>::reset(u.release());
return *this;
}
unique_ptr& operator=(T* p) {
reset(p);
return *this;
return std::unique_ptr<T>::operator=(p);
}
};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
const T& operator*() const { return *ptr_; }
T* operator->() const { return ptr_; }
T* get() const noexcept { return ptr_; }
explicit operator bool() const { return ptr_ != nullptr; }
#if FLATBUFFERS_USE_STD_OPTIONAL
template<class T>
using Optional = std::optional<T>;
using nullopt_t = std::nullopt_t;
inline constexpr nullopt_t nullopt = std::nullopt;
// modifiers
T* release() {
T* value = ptr_;
ptr_ = nullptr;
return value;
}
#else
// Limited implementation of Optional<T> type for a scalar T.
// This implementation limited by trivial types compatible with
// std::is_arithmetic<T> or std::is_enum<T> type traits.
void reset(T* p = nullptr) {
T* value = ptr_;
ptr_ = p;
if (value) delete value;
}
// A tag to indicate an empty flatbuffers::optional<T>.
struct nullopt_t {
explicit FLATBUFFERS_CONSTEXPR_CPP11 nullopt_t(int) {}
};
void swap(unique_ptr& u) {
T* temp_ptr = ptr_;
ptr_ = u.ptr_;
u.ptr_ = temp_ptr;
}
#if defined(FLATBUFFERS_CONSTEXPR_DEFINED)
namespace internal {
template <class> struct nullopt_holder {
static constexpr nullopt_t instance_ = nullopt_t(0);
};
template<class Dummy>
constexpr nullopt_t nullopt_holder<Dummy>::instance_;
}
static constexpr const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
private:
T* ptr_;
#else
namespace internal {
template <class> struct nullopt_holder {
static const nullopt_t instance_;
};
template<class Dummy>
const nullopt_t nullopt_holder<Dummy>::instance_ = nullopt_t(0);
}
static const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
#endif
template<class T>
class Optional FLATBUFFERS_FINAL_CLASS {
// Non-scalar 'T' would extremely complicated Optional<T>.
// Use is_scalar<T> checking because flatbuffers flatbuffers::is_arithmetic<T>
// isn't implemented.
static_assert(flatbuffers::is_scalar<T>::value, "unexpected type T");
public:
~Optional() {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional() FLATBUFFERS_NOEXCEPT
: value_(), has_value_(false) {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional(nullopt_t) FLATBUFFERS_NOEXCEPT
: value_(), has_value_(false) {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional(T val) FLATBUFFERS_NOEXCEPT
: value_(val), has_value_(true) {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional(const Optional &other) FLATBUFFERS_NOEXCEPT
: value_(other.value_), has_value_(other.has_value_) {}
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(const Optional &other) FLATBUFFERS_NOEXCEPT {
value_ = other.value_;
has_value_ = other.has_value_;
return *this;
}
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(nullopt_t) FLATBUFFERS_NOEXCEPT {
value_ = T();
has_value_ = false;
return *this;
}
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(T val) FLATBUFFERS_NOEXCEPT {
value_ = val;
has_value_ = true;
return *this;
}
void reset() FLATBUFFERS_NOEXCEPT {
*this = nullopt;
}
void swap(Optional &other) FLATBUFFERS_NOEXCEPT {
std::swap(value_, other.value_);
std::swap(has_value_, other.has_value_);
}
FLATBUFFERS_CONSTEXPR_CPP11 FLATBUFFERS_EXPLICIT_CPP11 operator bool() const FLATBUFFERS_NOEXCEPT {
return has_value_;
}
FLATBUFFERS_CONSTEXPR_CPP11 bool has_value() const FLATBUFFERS_NOEXCEPT {
return has_value_;
}
FLATBUFFERS_CONSTEXPR_CPP11 const T& operator*() const FLATBUFFERS_NOEXCEPT {
return value_;
}
const T& value() const {
FLATBUFFERS_ASSERT(has_value());
return value_;
}
T value_or(T default_value) const FLATBUFFERS_NOEXCEPT {
return has_value() ? value_ : default_value;
}
private:
T value_;
bool has_value_;
};
template<class T>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& opt, nullopt_t) FLATBUFFERS_NOEXCEPT {
return !opt;
}
template<class T>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(nullopt_t, const Optional<T>& opt) FLATBUFFERS_NOEXCEPT {
return !opt;
}
template<class T, class U>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const U& rhs) FLATBUFFERS_NOEXCEPT {
return static_cast<bool>(lhs) && (*lhs == rhs);
}
template<class T, class U>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const T& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
return static_cast<bool>(rhs) && (lhs == *rhs);
}
template<class T, class U>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
return static_cast<bool>(lhs) != static_cast<bool>(rhs)
? false
: !static_cast<bool>(lhs) ? false : (*lhs == *rhs);
}
#endif // FLATBUFFERS_USE_STD_OPTIONAL
// Very limited and naive partial implementation of C++20 std::span<T,Extent>.
#if defined(FLATBUFFERS_USE_STD_SPAN)
inline constexpr std::size_t dynamic_extent = std::dynamic_extent;
template<class T, std::size_t Extent = std::dynamic_extent>
using span = std::span<T, Extent>;
#else // !defined(FLATBUFFERS_USE_STD_SPAN)
FLATBUFFERS_CONSTEXPR std::size_t dynamic_extent = static_cast<std::size_t>(-1);
// Exclude this code if MSVC2010 or non-STL Android is active.
// The non-STL Android doesn't have `std::is_convertible` required for SFINAE.
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
namespace internal {
// This is SFINAE helper class for checking of a common condition:
// > This overload only participates in overload resolution
// > Check whether a pointer to an array of From can be converted
// > to a pointer to an array of To.
// This helper is used for checking of 'From -> const From'.
template<class To, std::size_t Extent, class From, std::size_t N>
struct is_span_convertible {
using type =
typename std::conditional<std::is_convertible<From (*)[], To (*)[]>::value
&& (Extent == dynamic_extent || N == Extent),
int, void>::type;
};
template <class T> bool operator==(const unique_ptr<T>& x,
const unique_ptr<T>& y) {
return x.get() == y.get();
template<typename T>
struct SpanIterator {
// TODO: upgrade to std::random_access_iterator_tag.
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = typename std::remove_cv<T>::type;
using reference = T&;
using pointer = T*;
// Convince MSVC compiler that this iterator is trusted (it is verified).
#ifdef _MSC_VER
using _Unchecked_type = pointer;
#endif // _MSC_VER
SpanIterator(pointer ptr) : ptr_(ptr) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
SpanIterator& operator++() { ptr_++; return *this; }
SpanIterator operator++(int) { auto tmp = *this; ++(*this); return tmp; }
friend bool operator== (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ == rhs.ptr_; }
friend bool operator!= (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ != rhs.ptr_; }
private:
pointer ptr_;
};
} // namespace internal
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
// T - element type; must be a complete type that is not an abstract
// class type.
// Extent - the number of elements in the sequence, or dynamic.
template<class T, std::size_t Extent = dynamic_extent>
class span FLATBUFFERS_FINAL_CLASS {
public:
typedef T element_type;
typedef T& reference;
typedef const T& const_reference;
typedef T* pointer;
typedef const T* const_pointer;
typedef std::size_t size_type;
static FLATBUFFERS_CONSTEXPR size_type extent = Extent;
// Returns the number of elements in the span.
FLATBUFFERS_CONSTEXPR_CPP11 size_type size() const FLATBUFFERS_NOEXCEPT {
return count_;
}
template <class T, class D> bool operator==(const unique_ptr<T>& x,
const D* y) {
return static_cast<D*>(x.get()) == y;
// Returns the size of the sequence in bytes.
FLATBUFFERS_CONSTEXPR_CPP11
size_type size_bytes() const FLATBUFFERS_NOEXCEPT {
return size() * sizeof(element_type);
}
template <class T> bool operator==(const unique_ptr<T>& x, intptr_t y) {
return reinterpret_cast<intptr_t>(x.get()) == y;
// Checks if the span is empty.
FLATBUFFERS_CONSTEXPR_CPP11 bool empty() const FLATBUFFERS_NOEXCEPT {
return size() == 0;
}
template <class T> bool operator!=(const unique_ptr<T>& x, decltype(nullptr)) {
return !!x;
// Returns a pointer to the beginning of the sequence.
FLATBUFFERS_CONSTEXPR_CPP11 pointer data() const FLATBUFFERS_NOEXCEPT {
return data_;
}
template <class T> bool operator!=(decltype(nullptr), const unique_ptr<T>& x) {
return !!x;
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
using Iterator = internal::SpanIterator<T>;
Iterator begin() const { return Iterator(data()); }
Iterator end() const { return Iterator(data() + size()); }
#endif
// Returns a reference to the idx-th element of the sequence.
// The behavior is undefined if the idx is greater than or equal to size().
FLATBUFFERS_CONSTEXPR_CPP11 reference operator[](size_type idx) const {
return data()[idx];
}
template <class T> bool operator==(const unique_ptr<T>& x, decltype(nullptr)) {
return !x;
FLATBUFFERS_CONSTEXPR_CPP11 span(const span &other) FLATBUFFERS_NOEXCEPT
: data_(other.data_), count_(other.count_) {}
FLATBUFFERS_CONSTEXPR_CPP14 span &operator=(const span &other)
FLATBUFFERS_NOEXCEPT {
data_ = other.data_;
count_ = other.count_;
}
template <class T> bool operator==(decltype(nullptr), const unique_ptr<T>& x) {
return !x;
// Limited implementation of
// `template <class It> constexpr std::span(It first, size_type count);`.
//
// Constructs a span that is a view over the range [first, first + count);
// the resulting span has: data() == first and size() == count.
// The behavior is undefined if [first, first + count) is not a valid range,
// or if (extent != flatbuffers::dynamic_extent && count != extent).
FLATBUFFERS_CONSTEXPR_CPP11
explicit span(pointer first, size_type count) FLATBUFFERS_NOEXCEPT
: data_ (Extent == dynamic_extent ? first : (Extent == count ? first : nullptr)),
count_(Extent == dynamic_extent ? count : (Extent == count ? Extent : 0)) {
// Make span empty if the count argument is incompatible with span<T,N>.
}
#endif // !FLATBUFFERS_CPP98_STL
// Exclude this code if MSVC2010 is active. The MSVC2010 isn't C++11
// compliant, it doesn't support default template arguments for functions.
#if defined(FLATBUFFERS_SPAN_MINIMAL)
FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
count_(0) {
static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
}
#else
// Constructs an empty span whose data() == nullptr and size() == 0.
// This overload only participates in overload resolution if
// extent == 0 || extent == flatbuffers::dynamic_extent.
// A dummy template argument N is need dependency for SFINAE.
template<std::size_t N = 0,
typename internal::is_span_convertible<element_type, Extent, element_type, (N - N)>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
count_(0) {
static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
}
// Constructs a span that is a view over the array arr; the resulting span
// has size() == N and data() == std::data(arr). These overloads only
// participate in overload resolution if
// extent == std::dynamic_extent || N == extent is true and
// std::remove_pointer_t<decltype(std::data(arr))>(*)[]
// is convertible to element_type (*)[].
template<std::size_t N,
typename internal::is_span_convertible<element_type, Extent, element_type, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(element_type (&arr)[N]) FLATBUFFERS_NOEXCEPT
: data_(arr), count_(N) {}
template<class U, std::size_t N,
typename internal::is_span_convertible<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
: data_(arr.data()), count_(N) {}
//template<class U, std::size_t N,
// int = 0>
//FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
// : data_(arr.data()), count_(N) {}
template<class U, std::size_t N,
typename internal::is_span_convertible<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
: data_(arr.data()), count_(N) {}
// Converting constructor from another span s;
// the resulting span has size() == s.size() and data() == s.data().
// This overload only participates in overload resolution
// if extent == std::dynamic_extent || N == extent is true and U (*)[]
// is convertible to element_type (*)[].
template<class U, std::size_t N,
typename internal::is_span_convertible<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(const flatbuffers::span<U, N> &s) FLATBUFFERS_NOEXCEPT
: span(s.data(), s.size()) {
}
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
private:
// This is a naive implementation with 'count_' member even if (Extent != dynamic_extent).
pointer const data_;
size_type count_;
};
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<ElementType, Extent> make_span(ElementType(&arr)[Extent]) FLATBUFFERS_NOEXCEPT {
return span<ElementType, Extent>(arr);
}
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const ElementType, Extent> make_span(const ElementType(&arr)[Extent]) FLATBUFFERS_NOEXCEPT {
return span<const ElementType, Extent>(arr);
}
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<ElementType, Extent> make_span(std::array<ElementType, Extent> &arr) FLATBUFFERS_NOEXCEPT {
return span<ElementType, Extent>(arr);
}
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const ElementType, Extent> make_span(const std::array<ElementType, Extent> &arr) FLATBUFFERS_NOEXCEPT {
return span<const ElementType, Extent>(arr);
}
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<ElementType, dynamic_extent> make_span(ElementType *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<ElementType, dynamic_extent>(first, count);
}
template<class ElementType, std::size_t Extent>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const ElementType, dynamic_extent> make_span(const ElementType *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<const ElementType, dynamic_extent>(first, count);
}
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
} // namespace flatbuffers

View File

@ -0,0 +1,64 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STRING_H_
#define FLATBUFFERS_STRING_H_
#include "flatbuffers/base.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
struct String : public Vector<char> {
const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
std::string str() const { return std::string(c_str(), size()); }
// clang-format off
#ifdef FLATBUFFERS_HAS_STRING_VIEW
flatbuffers::string_view string_view() const {
return flatbuffers::string_view(c_str(), size());
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
// clang-format on
bool operator<(const String &o) const {
return StringLessThan(this->data(), this->size(), o.data(), o.size());
}
};
// Convenience function to get std::string from a String returning an empty
// string on null pointer.
static inline std::string GetString(const String *str) {
return str ? str->str() : "";
}
// Convenience function to get char* from a String returning an empty string on
// null pointer.
static inline const char *GetCstring(const String *str) {
return str ? str->c_str() : "";
}
#ifdef FLATBUFFERS_HAS_STRING_VIEW
// Convenience function to get string_view from a String returning an empty
// string_view on null pointer.
static inline flatbuffers::string_view GetStringView(const String *str) {
return str ? str->string_view() : flatbuffers::string_view();
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
} // namespace flatbuffers
#endif // FLATBUFFERS_STRING_H_

View File

@ -0,0 +1,53 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STRUCT_H_
#define FLATBUFFERS_STRUCT_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// "structs" are flat structures that do not have an offset table, thus
// always have all members present and do not support forwards/backwards
// compatible extensions.
class Struct FLATBUFFERS_FINAL_CLASS {
public:
template<typename T> T GetField(uoffset_t o) const {
return ReadScalar<T>(&data_[o]);
}
template<typename T> T GetStruct(uoffset_t o) const {
return reinterpret_cast<T>(&data_[o]);
}
const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
private:
// private constructor & copy constructor: you obtain instances of this
// class by pointing to existing data only
Struct();
Struct(const Struct &);
Struct &operator=(const Struct &);
uint8_t data_[1];
};
} // namespace flatbuffers
#endif // FLATBUFFERS_STRUCT_H_

View File

@ -0,0 +1,168 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_TABLE_H_
#define FLATBUFFERS_TABLE_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// "tables" use an offset table (possibly shared) that allows fields to be
// omitted and added at will, but uses an extra indirection to read.
class Table {
public:
const uint8_t *GetVTable() const {
return data_ - ReadScalar<soffset_t>(data_);
}
// This gets the field offset for any of the functions below it, or 0
// if the field was not present.
voffset_t GetOptionalFieldOffset(voffset_t field) const {
// The vtable offset is always at the start.
auto vtable = GetVTable();
// The first element is the size of the vtable (fields + type id + itself).
auto vtsize = ReadScalar<voffset_t>(vtable);
// If the field we're accessing is outside the vtable, we're reading older
// data, so it's the same as if the offset was 0 (not present).
return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
}
template<typename T> T GetField(voffset_t field, T defaultval) const {
auto field_offset = GetOptionalFieldOffset(field);
return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
}
template<typename P> P GetPointer(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
: nullptr;
}
template<typename P> P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P>(field);
}
template<typename P> P GetStruct(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = const_cast<uint8_t *>(data_ + field_offset);
return field_offset ? reinterpret_cast<P>(p) : nullptr;
}
template<typename Raw, typename Face>
flatbuffers::Optional<Face> GetOptional(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? Optional<Face>(static_cast<Face>(ReadScalar<Raw>(p)))
: Optional<Face>();
}
template<typename T> bool SetField(voffset_t field, T val, T def) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return IsTheSameAs(val, def);
WriteScalar(data_ + field_offset, val);
return true;
}
template<typename T> bool SetField(voffset_t field, T val) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return false;
WriteScalar(data_ + field_offset, val);
return true;
}
bool SetPointer(voffset_t field, const uint8_t *val) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return false;
WriteScalar(data_ + field_offset,
static_cast<uoffset_t>(val - (data_ + field_offset)));
return true;
}
uint8_t *GetAddressOf(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
return field_offset ? data_ + field_offset : nullptr;
}
const uint8_t *GetAddressOf(voffset_t field) const {
return const_cast<Table *>(this)->GetAddressOf(field);
}
bool CheckField(voffset_t field) const {
return GetOptionalFieldOffset(field) != 0;
}
// Verify the vtable of this table.
// Call this once per table, followed by VerifyField once per field.
bool VerifyTableStart(Verifier &verifier) const {
return verifier.VerifyTableStart(data_);
}
// Verify a particular field.
template<typename T>
bool VerifyField(const Verifier &verifier, voffset_t field,
size_t align) const {
// Calling GetOptionalFieldOffset should be safe now thanks to
// VerifyTable().
auto field_offset = GetOptionalFieldOffset(field);
// Check the actual field.
return !field_offset || verifier.VerifyField<T>(data_, field_offset, align);
}
// VerifyField for required fields.
template<typename T>
bool VerifyFieldRequired(const Verifier &verifier, voffset_t field,
size_t align) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyField<T>(data_, field_offset, align);
}
// Versions for offsets.
bool VerifyOffset(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return !field_offset || verifier.VerifyOffset(data_, field_offset);
}
bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyOffset(data_, field_offset);
}
private:
// private constructor & copy constructor: you obtain instances of this
// class by pointing to existing data only
Table();
Table(const Table &other);
Table &operator=(const Table &);
uint8_t data_[1];
};
// This specialization allows avoiding warnings like:
// MSVC C4800: type: forcing value to bool 'true' or 'false'.
template<>
inline flatbuffers::Optional<bool> Table::GetOptional<uint8_t, bool>(
voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? Optional<bool>(ReadScalar<uint8_t>(p) != 0)
: Optional<bool>();
}
} // namespace flatbuffers
#endif // FLATBUFFERS_TABLE_H_

View File

@ -17,18 +17,20 @@
#ifndef FLATBUFFERS_UTIL_H_
#define FLATBUFFERS_UTIL_H_
#include <ctype.h>
#include <errno.h>
#include "flatbuffers/base.h"
#include "flatbuffers/stl_emulation.h"
#ifndef FLATBUFFERS_PREFER_PRINTF
# include <iomanip>
# include <sstream>
#else // FLATBUFFERS_PREFER_PRINTF
# include <float.h>
# include <stdio.h>
#endif // FLATBUFFERS_PREFER_PRINTF
#include <iomanip>
#include <string>
namespace flatbuffers {
@ -50,6 +52,9 @@ inline bool is_alpha(char c) {
return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF);
}
// Check for uppercase alpha
inline bool is_alpha_upper(char c) { return check_ascii_range(c, 'A', 'Z'); }
// Check (case-insensitive) that `c` is equal to alpha.
inline bool is_alpha_char(char c, char alpha) {
FLATBUFFERS_ASSERT(is_alpha(alpha));
@ -72,6 +77,14 @@ inline bool is_xdigit(char c) {
// Case-insensitive isalnum
inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); }
inline char CharToUpper(char c) {
return static_cast<char>(::toupper(static_cast<unsigned char>(c)));
}
inline char CharToLower(char c) {
return static_cast<char>(::tolower(static_cast<unsigned char>(c)));
}
// @end-locale-independent functions for ASCII character set
#ifdef FLATBUFFERS_PREFER_PRINTF
@ -82,7 +95,7 @@ template<typename T> size_t IntToDigitCount(T t) {
// Count a single 0 left of the dot for fractional numbers
if (-1 < t && t < 1) digit_count++;
// Count digits until fractional part
T eps = std::numeric_limits<float>::epsilon();
T eps = std::numeric_limits<T>::epsilon();
while (t <= (-1 + eps) || (1 - eps) <= t) {
t /= 10;
digit_count++;
@ -133,20 +146,6 @@ template<> inline std::string NumToString<unsigned char>(unsigned char t) {
template<> inline std::string NumToString<char>(char t) {
return NumToString(static_cast<int>(t));
}
#if defined(FLATBUFFERS_CPP98_STL)
template<> inline std::string NumToString<long long>(long long t) {
char buf[21]; // (log((1 << 63) - 1) / log(10)) + 2
snprintf(buf, sizeof(buf), "%lld", t);
return std::string(buf);
}
template<>
inline std::string NumToString<unsigned long long>(unsigned long long t) {
char buf[22]; // (log((1 << 63) - 1) / log(10)) + 1
snprintf(buf, sizeof(buf), "%llu", t);
return std::string(buf);
}
#endif // defined(FLATBUFFERS_CPP98_STL)
// Special versions for floats/doubles.
template<typename T> std::string FloatToString(T t, int precision) {
@ -256,7 +255,7 @@ inline void strtoval_impl(double *val, const char *str, char **endptr) {
}
// UBSAN: double to float is safe if numeric_limits<float>::is_iec559 is true.
__supress_ubsan__("float-cast-overflow")
__suppress_ubsan__("float-cast-overflow")
inline void strtoval_impl(float *val, const char *str, char **endptr) {
*val = __strtof_impl(str, endptr);
}
@ -323,6 +322,9 @@ inline bool StringToFloatImpl(T *val, const char *const str) {
// - If the converted value falls out of range of corresponding return type, a
// range error occurs. In this case value MAX(T)/MIN(T) is returned.
template<typename T> inline bool StringToNumber(const char *s, T *val) {
// Assert on `unsigned long` and `signed long` on LP64.
// If it is necessary, it could be solved with flatbuffers::enable_if<B,T>.
static_assert(sizeof(T) < sizeof(int64_t), "unexpected type T");
FLATBUFFERS_ASSERT(s && val);
int64_t i64;
// The errno check isn't needed, will return MAX/MIN on overflow.
@ -446,13 +448,17 @@ std::string StripPath(const std::string &filepath);
// Strip the last component of the path + separator.
std::string StripFileName(const std::string &filepath);
// Concatenates a path with a filename, regardless of wether the path
std::string StripPrefix(const std::string &filepath,
const std::string &prefix_to_remove);
// Concatenates a path with a filename, regardless of whether the path
// ends in a separator or not.
std::string ConCatPathFileName(const std::string &path,
const std::string &filename);
// Replaces any '\\' separators with '/'
std::string PosixPath(const char *path);
std::string PosixPath(const std::string &path);
// This function ensure a directory exists, by recursively
// creating dirs for any parts of the path that don't exist yet.
@ -462,6 +468,10 @@ void EnsureDirExists(const std::string &filepath);
// Returns the input path if the absolute path couldn't be resolved.
std::string AbsolutePath(const std::string &filepath);
// Returns files relative to the --project_root path, prefixed with `//`.
std::string RelativeToRootPath(const std::string &project,
const std::string &filepath);
// To and from UTF-8 unicode conversion functions
// Convert a unicode code point into a UTF-8 representation by appending it
@ -675,8 +685,31 @@ bool SetGlobalTestLocale(const char *locale_name,
bool ReadEnvironmentVariable(const char *var_name,
std::string *_value = nullptr);
// MSVC specific: Send all assert reports to STDOUT to prevent CI hangs.
void SetupDefaultCRTReportMode();
enum class Case {
kUnknown = 0,
// TheQuickBrownFox
kUpperCamel = 1,
// theQuickBrownFox
kLowerCamel = 2,
// the_quick_brown_fox
kSnake = 3,
// THE_QUICK_BROWN_FOX
kScreamingSnake = 4,
// THEQUICKBROWNFOX
kAllUpper = 5,
// thequickbrownfox
kAllLower = 6,
// the-quick-brown-fox
kDasher = 7,
// THEQuiCKBr_ownFox (or whatever you want, we won't change it)
kKeep = 8,
// the_quick_brown_fox123 (as opposed to the_quick_brown_fox_123)
kSnake2 = 9,
};
// Convert the `input` string of case `input_case` to the specified `output_case`.
std::string ConvertCase(const std::string &input, Case output_case,
Case input_case = Case::kSnake);
} // namespace flatbuffers

View File

@ -0,0 +1,389 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VECTOR_H_
#define FLATBUFFERS_VECTOR_H_
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/stl_emulation.h"
namespace flatbuffers {
struct String;
// An STL compatible iterator implementation for Vector below, effectively
// calling Get() for every element.
template<typename T, typename IT> struct VectorIterator {
typedef std::random_access_iterator_tag iterator_category;
typedef IT value_type;
typedef ptrdiff_t difference_type;
typedef IT *pointer;
typedef IT &reference;
VectorIterator(const uint8_t *data, uoffset_t i)
: data_(data + IndirectHelper<T>::element_stride * i) {}
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
VectorIterator() : data_(nullptr) {}
VectorIterator &operator=(const VectorIterator &other) {
data_ = other.data_;
return *this;
}
VectorIterator &operator=(VectorIterator &&other) {
data_ = other.data_;
return *this;
}
bool operator==(const VectorIterator &other) const {
return data_ == other.data_;
}
bool operator<(const VectorIterator &other) const {
return data_ < other.data_;
}
bool operator!=(const VectorIterator &other) const {
return data_ != other.data_;
}
difference_type operator-(const VectorIterator &other) const {
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
}
// Note: return type is incompatible with the standard
// `reference operator*()`.
IT operator*() const { return IndirectHelper<T>::Read(data_, 0); }
// Note: return type is incompatible with the standard
// `pointer operator->()`.
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
VectorIterator &operator++() {
data_ += IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator operator++(int) {
VectorIterator temp(data_, 0);
data_ += IndirectHelper<T>::element_stride;
return temp;
}
VectorIterator operator+(const uoffset_t &offset) const {
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
0);
}
VectorIterator &operator+=(const uoffset_t &offset) {
data_ += offset * IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator &operator--() {
data_ -= IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator operator--(int) {
VectorIterator temp(data_, 0);
data_ -= IndirectHelper<T>::element_stride;
return temp;
}
VectorIterator operator-(const uoffset_t &offset) const {
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
0);
}
VectorIterator &operator-=(const uoffset_t &offset) {
data_ -= offset * IndirectHelper<T>::element_stride;
return *this;
}
private:
const uint8_t *data_;
};
template<typename Iterator>
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
explicit VectorReverseIterator(Iterator iter)
: std::reverse_iterator<Iterator>(iter) {}
// Note: return type is incompatible with the standard
// `reference operator*()`.
typename Iterator::value_type operator*() const {
auto tmp = std::reverse_iterator<Iterator>::current;
return *--tmp;
}
// Note: return type is incompatible with the standard
// `pointer operator->()`.
typename Iterator::value_type operator->() const {
auto tmp = std::reverse_iterator<Iterator>::current;
return *--tmp;
}
};
// This is used as a helper type for accessing vectors.
// Vector::data() assumes the vector elements start after the length field.
template<typename T> class Vector {
public:
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
iterator;
typedef VectorIterator<T, typename IndirectHelper<T>::return_type>
const_iterator;
typedef VectorReverseIterator<iterator> reverse_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
uoffset_t size() const { return EndianScalar(length_); }
// Deprecated: use size(). Here for backwards compatibility.
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
uoffset_t Length() const { return size(); }
typedef typename IndirectHelper<T>::return_type return_type;
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
typedef return_type value_type;
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<T>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is the right type!
template<typename U> const U *GetAs(uoffset_t i) const {
return reinterpret_cast<const U *>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is actually a string!
const String *GetAsString(uoffset_t i) const {
return reinterpret_cast<const String *>(Get(i));
}
const void *GetStructFromOffset(size_t o) const {
return reinterpret_cast<const void *>(Data() + o);
}
iterator begin() { return iterator(Data(), 0); }
const_iterator begin() const { return const_iterator(Data(), 0); }
iterator end() { return iterator(Data(), size()); }
const_iterator end() const { return const_iterator(Data(), size()); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Change elements if you have a non-const pointer to this object.
// Scalars only. See reflection.h, and the documentation.
void Mutate(uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
// Change an element of a vector of tables (or strings).
// "val" points to the new table/string, as you can obtain from
// e.g. reflection::AddFlatBuffer().
void MutateOffset(uoffset_t i, const uint8_t *val) {
FLATBUFFERS_ASSERT(i < size());
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
WriteScalar(data() + i,
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
}
// Get a mutable pointer to tables/strings inside this vector.
mutable_return_type GetMutableObject(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
}
// The raw data in little endian format. Use with care.
const uint8_t *Data() const {
return reinterpret_cast<const uint8_t *>(&length_ + 1);
}
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
template<typename K> return_type LookupByKey(K key) const {
void *search_result = std::bsearch(
&key, Data(), size(), IndirectHelper<T>::element_stride, KeyCompare<K>);
if (!search_result) {
return nullptr; // Key not found.
}
const uint8_t *element = reinterpret_cast<const uint8_t *>(search_result);
return IndirectHelper<T>::Read(element, 0);
}
template<typename K> mutable_return_type MutableLookupByKey(K key) {
return const_cast<mutable_return_type>(LookupByKey(key));
}
protected:
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
Vector();
uoffset_t length_;
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Vector(const Vector &);
Vector &operator=(const Vector &);
template<typename K> static int KeyCompare(const void *ap, const void *bp) {
const K *key = reinterpret_cast<const K *>(ap);
const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
auto table = IndirectHelper<T>::Read(data, 0);
// std::bsearch compares with the operands transposed, so we negate the
// result here.
return -table->KeyCompareWithValue(*key);
}
};
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<U>(vec.data(), vec.size());
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<const U>(vec.data(), vec.size());
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t> make_bytes_span(
Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::scalar_tag::value,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<uint8_t>(vec.Data(), vec.size() * sizeof(U));
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t> make_bytes_span(
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::scalar_tag::value,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<const uint8_t>(vec.Data(), vec.size() * sizeof(U));
}
// Convenient helper functions to get a span of any vector, regardless
// of whether it is null or not (the field is not set).
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> *ptr)
FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return ptr ? make_span(*ptr) : span<U>();
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
const Vector<U> *ptr) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return ptr ? make_span(*ptr) : span<const U>();
}
// Represent a vector much like the template above, but in this case we
// don't know what the element types are (used with reflection.h).
class VectorOfAny {
public:
uoffset_t size() const { return EndianScalar(length_); }
const uint8_t *Data() const {
return reinterpret_cast<const uint8_t *>(&length_ + 1);
}
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
protected:
VectorOfAny();
uoffset_t length_;
private:
VectorOfAny(const VectorOfAny &);
VectorOfAny &operator=(const VectorOfAny &);
};
template<typename T, typename U>
Vector<Offset<T>> *VectorCast(Vector<Offset<U>> *ptr) {
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
return reinterpret_cast<Vector<Offset<T>> *>(ptr);
}
template<typename T, typename U>
const Vector<Offset<T>> *VectorCast(const Vector<Offset<U>> *ptr) {
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
return reinterpret_cast<const Vector<Offset<T>> *>(ptr);
}
// Convenient helper function to get the length of any vector, regardless
// of whether it is null or not (the field is not set).
template<typename T> static inline size_t VectorLength(const Vector<T> *v) {
return v ? v->size() : 0;
}
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_

View File

@ -0,0 +1,271 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
#include "flatbuffers/detached_buffer.h"
namespace flatbuffers {
// This is a minimal replication of std::vector<uint8_t> functionality,
// except growing from higher to lower addresses. i.e. push_back() inserts data
// in the lowest address in the vector.
// Since this vector leaves the lower part unused, we support a "scratch-pad"
// that can be stored there for temporary data, to share the allocated space.
// Essentially, this supports 2 std::vectors in a single buffer.
class vector_downward {
public:
explicit vector_downward(size_t initial_size, Allocator *allocator,
bool own_allocator, size_t buffer_minalign)
: allocator_(allocator),
own_allocator_(own_allocator),
initial_size_(initial_size),
buffer_minalign_(buffer_minalign),
reserved_(0),
size_(0),
buf_(nullptr),
cur_(nullptr),
scratch_(nullptr) {}
vector_downward(vector_downward &&other)
// clang-format on
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
initial_size_(other.initial_size_),
buffer_minalign_(other.buffer_minalign_),
reserved_(other.reserved_),
size_(other.size_),
buf_(other.buf_),
cur_(other.cur_),
scratch_(other.scratch_) {
// No change in other.allocator_
// No change in other.initial_size_
// No change in other.buffer_minalign_
other.own_allocator_ = false;
other.reserved_ = 0;
other.buf_ = nullptr;
other.cur_ = nullptr;
other.scratch_ = nullptr;
}
vector_downward &operator=(vector_downward &&other) {
// Move construct a temporary and swap idiom
vector_downward temp(std::move(other));
swap(temp);
return *this;
}
~vector_downward() {
clear_buffer();
clear_allocator();
}
void reset() {
clear_buffer();
clear();
}
void clear() {
if (buf_) {
cur_ = buf_ + reserved_;
} else {
reserved_ = 0;
cur_ = nullptr;
}
size_ = 0;
clear_scratch();
}
void clear_scratch() { scratch_ = buf_; }
void clear_allocator() {
if (own_allocator_ && allocator_) { delete allocator_; }
allocator_ = nullptr;
own_allocator_ = false;
}
void clear_buffer() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
buf_ = nullptr;
}
// Relinquish the pointer to the caller.
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
auto *buf = buf_;
allocated_bytes = reserved_;
offset = static_cast<size_t>(cur_ - buf_);
// release_raw only relinquishes the buffer ownership.
// Does not deallocate or reset the allocator. Destructor will do that.
buf_ = nullptr;
clear();
return buf;
}
// Relinquish the pointer to the caller.
DetachedBuffer release() {
// allocator ownership (if any) is transferred to DetachedBuffer.
DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_,
size());
if (own_allocator_) {
allocator_ = nullptr;
own_allocator_ = false;
}
buf_ = nullptr;
clear();
return fb;
}
size_t ensure_space(size_t len) {
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
// Beyond this, signed offsets may not have enough range:
// (FlatBuffers > 2GB not supported).
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
return len;
}
inline uint8_t *make_space(size_t len) {
if (len) {
ensure_space(len);
cur_ -= len;
size_ += static_cast<uoffset_t>(len);
}
return cur_;
}
// Returns nullptr if using the DefaultAllocator.
Allocator *get_custom_allocator() { return allocator_; }
inline uoffset_t size() const { return size_; }
uoffset_t scratch_size() const {
return static_cast<uoffset_t>(scratch_ - buf_);
}
size_t capacity() const { return reserved_; }
uint8_t *data() const {
FLATBUFFERS_ASSERT(cur_);
return cur_;
}
uint8_t *scratch_data() const {
FLATBUFFERS_ASSERT(buf_);
return buf_;
}
uint8_t *scratch_end() const {
FLATBUFFERS_ASSERT(scratch_);
return scratch_;
}
uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
void push(const uint8_t *bytes, size_t num) {
if (num > 0) { memcpy(make_space(num), bytes, num); }
}
// Specialized version of push() that avoids memcpy call for small data.
template<typename T> void push_small(const T &little_endian_t) {
make_space(sizeof(T));
*reinterpret_cast<T *>(cur_) = little_endian_t;
}
template<typename T> void scratch_push_small(const T &t) {
ensure_space(sizeof(T));
*reinterpret_cast<T *>(scratch_) = t;
scratch_ += sizeof(T);
}
// fill() is most frequently called with small byte counts (<= 4),
// which is why we're using loops rather than calling memset.
void fill(size_t zero_pad_bytes) {
make_space(zero_pad_bytes);
for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0;
}
// Version for when we know the size is larger.
// Precondition: zero_pad_bytes > 0
void fill_big(size_t zero_pad_bytes) {
memset(make_space(zero_pad_bytes), 0, zero_pad_bytes);
}
void pop(size_t bytes_to_remove) {
cur_ += bytes_to_remove;
size_ -= static_cast<uoffset_t>(bytes_to_remove);
}
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
void swap(vector_downward &other) {
using std::swap;
swap(allocator_, other.allocator_);
swap(own_allocator_, other.own_allocator_);
swap(initial_size_, other.initial_size_);
swap(buffer_minalign_, other.buffer_minalign_);
swap(reserved_, other.reserved_);
swap(size_, other.size_);
swap(buf_, other.buf_);
swap(cur_, other.cur_);
swap(scratch_, other.scratch_);
}
void swap_allocator(vector_downward &other) {
using std::swap;
swap(allocator_, other.allocator_);
swap(own_allocator_, other.own_allocator_);
}
private:
// You shouldn't really be copying instances of this class.
FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
Allocator *allocator_;
bool own_allocator_;
size_t initial_size_;
size_t buffer_minalign_;
size_t reserved_;
uoffset_t size_;
uint8_t *buf_;
uint8_t *cur_; // Points at location between empty (below) and used (above).
uint8_t *scratch_; // Points to the end of the scratchpad in use.
void reallocate(size_t len) {
auto old_reserved = reserved_;
auto old_size = size();
auto old_scratch_size = scratch_size();
reserved_ +=
(std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
if (buf_) {
buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_,
old_size, old_scratch_size);
} else {
buf_ = Allocate(allocator_, reserved_);
}
cur_ = buf_ + reserved_ - old_size;
scratch_ = buf_ + old_scratch_size;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_

View File

@ -0,0 +1,317 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VERIFIER_H_
#define FLATBUFFERS_VERIFIER_H_
#include "flatbuffers/base.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// Helper class to verify the integrity of a FlatBuffer
class Verifier FLATBUFFERS_FINAL_CLASS {
public:
struct Options {
// The maximum nesting of tables and vectors before we call it invalid.
uoffset_t max_depth = 64;
// The maximum number of tables we will verify before we call it invalid.
uoffset_t max_tables = 1000000;
// If true, verify all data is aligned.
bool check_alignment = true;
// If true, run verifier on nested flatbuffers
bool check_nested_flatbuffers = true;
};
explicit Verifier(const uint8_t *const buf, const size_t buf_len,
const Options &opts)
: buf_(buf), size_(buf_len), opts_(opts) {
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
}
// Deprecated API, please construct with Verifier::Options.
Verifier(const uint8_t *const buf, const size_t buf_len,
const uoffset_t max_depth = 64, const uoffset_t max_tables = 1000000,
const bool check_alignment = true)
: Verifier(buf, buf_len, [&] {
Options opts;
opts.max_depth = max_depth;
opts.max_tables = max_tables;
opts.check_alignment = check_alignment;
return opts;
}()) {}
// Central location where any verification failures register.
bool Check(const bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
FLATBUFFERS_ASSERT(ok);
#endif
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
if (!ok)
upper_bound_ = 0;
#endif
// clang-format on
return ok;
}
// Verify any range within the buffer.
bool Verify(const size_t elem, const size_t elem_len) const {
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
auto upper_bound = elem + elem_len;
if (upper_bound_ < upper_bound)
upper_bound_ = upper_bound;
#endif
// clang-format on
return Check(elem_len < size_ && elem <= size_ - elem_len);
}
bool VerifyAlignment(const size_t elem, const size_t align) const {
return Check((elem & (align - 1)) == 0 || !opts_.check_alignment);
}
// Verify a range indicated by sizeof(T).
template<typename T> bool Verify(const size_t elem) const {
return VerifyAlignment(elem, sizeof(T)) && Verify(elem, sizeof(T));
}
bool VerifyFromPointer(const uint8_t *const p, const size_t len) {
return Verify(static_cast<size_t>(p - buf_), len);
}
// Verify relative to a known-good base pointer.
bool VerifyFieldStruct(const uint8_t *const base, const voffset_t elem_off,
const size_t elem_len, const size_t align) const {
const auto f = static_cast<size_t>(base - buf_) + elem_off;
return VerifyAlignment(f, align) && Verify(f, elem_len);
}
template<typename T>
bool VerifyField(const uint8_t *const base, const voffset_t elem_off,
const size_t align) const {
const auto f = static_cast<size_t>(base - buf_) + elem_off;
return VerifyAlignment(f, align) && Verify(f, sizeof(T));
}
// Verify a pointer (may be NULL) of a table type.
template<typename T> bool VerifyTable(const T *const table) {
return !table || table->Verify(*this);
}
// Verify a pointer (may be NULL) of any vector type.
template<typename T> bool VerifyVector(const Vector<T> *const vec) const {
return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec),
sizeof(T));
}
// Verify a pointer (may be NULL) of a vector to struct.
template<typename T>
bool VerifyVector(const Vector<const T *> *const vec) const {
return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
}
// Verify a pointer (may be NULL) to string.
bool VerifyString(const String *const str) const {
size_t end;
return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str),
1, &end) &&
Verify(end, 1) && // Must have terminator
Check(buf_[end] == '\0')); // Terminating byte must be 0.
}
// Common code between vectors and strings.
bool VerifyVectorOrString(const uint8_t *const vec, const size_t elem_size,
size_t *const end = nullptr) const {
const auto veco = static_cast<size_t>(vec - buf_);
// Check we can read the size field.
if (!Verify<uoffset_t>(veco)) return false;
// Check the whole array. If this is a string, the byte past the array must
// be 0.
const auto size = ReadScalar<uoffset_t>(vec);
const auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
if (!Check(size < max_elems))
return false; // Protect against byte_size overflowing.
const auto byte_size = sizeof(size) + elem_size * size;
if (end) *end = veco + byte_size;
return Verify(veco, byte_size);
}
// Special case for string contents, after the above has been called.
bool VerifyVectorOfStrings(const Vector<Offset<String>> *const vec) const {
if (vec) {
for (uoffset_t i = 0; i < vec->size(); i++) {
if (!VerifyString(vec->Get(i))) return false;
}
}
return true;
}
// Special case for table contents, after the above has been called.
template<typename T>
bool VerifyVectorOfTables(const Vector<Offset<T>> *const vec) {
if (vec) {
for (uoffset_t i = 0; i < vec->size(); i++) {
if (!vec->Get(i)->Verify(*this)) return false;
}
}
return true;
}
__suppress_ubsan__("unsigned-integer-overflow") bool VerifyTableStart(
const uint8_t *const table) {
// Check the vtable offset.
const auto tableo = static_cast<size_t>(table - buf_);
if (!Verify<soffset_t>(tableo)) return false;
// This offset may be signed, but doing the subtraction unsigned always
// gives the result we want.
const auto vtableo =
tableo - static_cast<size_t>(ReadScalar<soffset_t>(table));
// Check the vtable size field, then check vtable fits in its entirety.
if (!(VerifyComplexity() && Verify<voffset_t>(vtableo) &&
VerifyAlignment(ReadScalar<voffset_t>(buf_ + vtableo),
sizeof(voffset_t))))
return false;
const auto vsize = ReadScalar<voffset_t>(buf_ + vtableo);
return Check((vsize & 1) == 0) && Verify(vtableo, vsize);
}
template<typename T>
bool VerifyBufferFromStart(const char *const identifier, const size_t start) {
// Buffers have to be of some size to be valid. The reason it is a runtime
// check instead of static_assert, is that nested flatbuffers go through
// this call and their size is determined at runtime.
if (!Check(size_ >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
// If an identifier is provided, check that we have a buffer
if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) &&
BufferHasIdentifier(buf_ + start, identifier)))) {
return false;
}
// Call T::Verify, which must be in the generated code for this type.
const auto o = VerifyOffset(start);
return Check(o != 0) &&
reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
&& GetComputedSize()
#endif
;
// clang-format on
}
template<typename T>
bool VerifyNestedFlatBuffer(const Vector<uint8_t> *const buf,
const char *const identifier) {
// Caller opted out of this.
if (!opts_.check_nested_flatbuffers) return true;
// An empty buffer is OK as it indicates not present.
if (!buf) return true;
// If there is a nested buffer, it must be greater than the min size.
if (!Check(buf->size() >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
Verifier nested_verifier(buf->data(), buf->size());
return nested_verifier.VerifyBuffer<T>(identifier);
}
// Verify this whole buffer, starting with root type T.
template<typename T> bool VerifyBuffer() { return VerifyBuffer<T>(nullptr); }
template<typename T> bool VerifyBuffer(const char *const identifier) {
return VerifyBufferFromStart<T>(identifier, 0);
}
template<typename T>
bool VerifySizePrefixedBuffer(const char *const identifier) {
return Verify<uoffset_t>(0U) &&
Check(ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t)) &&
VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
}
uoffset_t VerifyOffset(const size_t start) const {
if (!Verify<uoffset_t>(start)) return 0;
const auto o = ReadScalar<uoffset_t>(buf_ + start);
// May not point to itself.
if (!Check(o != 0)) return 0;
// Can't wrap around / buffers are max 2GB.
if (!Check(static_cast<soffset_t>(o) >= 0)) return 0;
// Must be inside the buffer to create a pointer from it (pointer outside
// buffer is UB).
if (!Verify(start + o, 1)) return 0;
return o;
}
uoffset_t VerifyOffset(const uint8_t *const base,
const voffset_t start) const {
return VerifyOffset(static_cast<size_t>(base - buf_) + start);
}
// Called at the start of a table to increase counters measuring data
// structure depth and amount, and possibly bails out with false if limits set
// by the constructor have been hit. Needs to be balanced with EndTable().
bool VerifyComplexity() {
depth_++;
num_tables_++;
return Check(depth_ <= opts_.max_depth && num_tables_ <= opts_.max_tables);
}
// Called at the end of a table to pop the depth count.
bool EndTable() {
depth_--;
return true;
}
// Returns the message size in bytes
size_t GetComputedSize() const {
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
uintptr_t size = upper_bound_;
// Align the size to uoffset_t
size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1);
return (size > size_) ? 0 : size;
#else
// Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work.
(void)upper_bound_;
FLATBUFFERS_ASSERT(false);
return 0;
#endif
// clang-format on
}
std::vector<uint8_t> *GetFlexReuseTracker() { return flex_reuse_tracker_; }
void SetFlexReuseTracker(std::vector<uint8_t> *const rt) {
flex_reuse_tracker_ = rt;
}
private:
const uint8_t *buf_;
const size_t size_;
const Options opts_;
mutable size_t upper_bound_ = 0;
uoffset_t depth_ = 0;
uoffset_t num_tables_ = 0;
std::vector<uint8_t> *flex_reuse_tracker_ = nullptr;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_