mirror of https://git.suyu.dev/suyu/suyu
hle: nvdrv: Rename Fence to NvFence to avoid naming conflicts.
This commit is contained in:
parent
d456b9d554
commit
402273d91b
|
@ -187,7 +187,7 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(Fence fence) {
|
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
|
||||||
return {
|
return {
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||||
Tegra::SubmissionMode::Increasing),
|
Tegra::SubmissionMode::Increasing),
|
||||||
|
@ -198,7 +198,8 @@ static std::vector<Tegra::CommandHeader> BuildWaitCommandList(Fence fence) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(Fence fence, u32 add_increment) {
|
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence,
|
||||||
|
u32 add_increment) {
|
||||||
std::vector<Tegra::CommandHeader> result{
|
std::vector<Tegra::CommandHeader> result{
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
||||||
Tegra::SubmissionMode::Increasing),
|
Tegra::SubmissionMode::Increasing),
|
||||||
|
@ -213,7 +214,7 @@ static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(Fence fence,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(Fence fence,
|
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence,
|
||||||
u32 add_increment) {
|
u32 add_increment) {
|
||||||
std::vector<Tegra::CommandHeader> result{
|
std::vector<Tegra::CommandHeader> result{
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
|
||||||
|
|
|
@ -109,7 +109,7 @@ private:
|
||||||
static_assert(sizeof(IoctlGetErrorNotification) == 16,
|
static_assert(sizeof(IoctlGetErrorNotification) == 16,
|
||||||
"IoctlGetErrorNotification is incorrect size");
|
"IoctlGetErrorNotification is incorrect size");
|
||||||
|
|
||||||
static_assert(sizeof(Fence) == 8, "Fence is incorrect size");
|
static_assert(sizeof(NvFence) == 8, "Fence is incorrect size");
|
||||||
|
|
||||||
struct IoctlAllocGpfifoEx {
|
struct IoctlAllocGpfifoEx {
|
||||||
u32_le num_entries{};
|
u32_le num_entries{};
|
||||||
|
@ -127,7 +127,7 @@ private:
|
||||||
u32_le num_entries{}; // in
|
u32_le num_entries{}; // in
|
||||||
u32_le flags{}; // in
|
u32_le flags{}; // in
|
||||||
u32_le unk0{}; // in (1 works)
|
u32_le unk0{}; // in (1 works)
|
||||||
Fence fence_out{}; // out
|
NvFence fence_out{}; // out
|
||||||
u32_le unk1{}; // in
|
u32_le unk1{}; // in
|
||||||
u32_le unk2{}; // in
|
u32_le unk2{}; // in
|
||||||
u32_le unk3{}; // in
|
u32_le unk3{}; // in
|
||||||
|
@ -153,13 +153,13 @@ private:
|
||||||
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
||||||
BitField<8, 1, u32_le> increment; // increment the returned fence
|
BitField<8, 1, u32_le> increment; // increment the returned fence
|
||||||
} flags;
|
} flags;
|
||||||
Fence fence_out{}; // returned new fence object for others to wait on
|
NvFence fence_out{}; // returned new fence object for others to wait on
|
||||||
|
|
||||||
u32 AddIncrementValue() const {
|
u32 AddIncrementValue() const {
|
||||||
return flags.add_increment.Value() << 1;
|
return flags.add_increment.Value() << 1;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(Fence),
|
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence),
|
||||||
"IoctlSubmitGpfifo is incorrect size");
|
"IoctlSubmitGpfifo is incorrect size");
|
||||||
|
|
||||||
struct IoctlGetWaitbase {
|
struct IoctlGetWaitbase {
|
||||||
|
@ -194,7 +194,7 @@ private:
|
||||||
|
|
||||||
std::shared_ptr<nvmap> nvmap_dev;
|
std::shared_ptr<nvmap> nvmap_dev;
|
||||||
SyncpointManager& syncpoint_manager;
|
SyncpointManager& syncpoint_manager;
|
||||||
Fence channel_fence;
|
NvFence channel_fence;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -16,17 +16,11 @@ using DeviceFD = s32;
|
||||||
|
|
||||||
constexpr DeviceFD INVALID_NVDRV_FD = -1;
|
constexpr DeviceFD INVALID_NVDRV_FD = -1;
|
||||||
|
|
||||||
struct Fence {
|
struct NvFence {
|
||||||
s32 id;
|
s32 id;
|
||||||
u32 value;
|
u32 value;
|
||||||
};
|
};
|
||||||
|
static_assert(sizeof(NvFence) == 8, "Fence has wrong size");
|
||||||
static_assert(sizeof(Fence) == 8, "Fence has wrong size");
|
|
||||||
|
|
||||||
struct MultiFence {
|
|
||||||
u32 num_fences;
|
|
||||||
std::array<Fence, 4> fences;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class NvResult : u32 {
|
enum class NvResult : u32 {
|
||||||
Success = 0x0,
|
Success = 0x0,
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include "core/hle/service/kernel_helpers.h"
|
#include "core/hle/service/kernel_helpers.h"
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
||||||
|
#include "core/hle/service/nvflinger/ui/fence.h"
|
||||||
#include "core/hle/service/service.h"
|
#include "core/hle/service/service.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
@ -37,7 +38,7 @@ class nvdevice;
|
||||||
/// Represents an Nvidia event
|
/// Represents an Nvidia event
|
||||||
struct NvEvent {
|
struct NvEvent {
|
||||||
Kernel::KEvent* event{};
|
Kernel::KEvent* event{};
|
||||||
Fence fence{};
|
NvFence fence{};
|
||||||
};
|
};
|
||||||
|
|
||||||
struct EventInterface {
|
struct EventInterface {
|
||||||
|
|
Loading…
Reference in New Issue