forked from ShuriZma/suyu
gpu: Refactor command and swap buffers interface for asynch.
This commit is contained in:
parent
4483089d70
commit
ac51d048a9
|
@ -36,7 +36,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3
|
||||||
|
|
||||||
auto& instance = Core::System::GetInstance();
|
auto& instance = Core::System::GetInstance();
|
||||||
instance.GetPerfStats().EndGameFrame();
|
instance.GetPerfStats().EndGameFrame();
|
||||||
instance.Renderer().SwapBuffers(framebuffer);
|
instance.GPU().SwapBuffers(framebuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -136,16 +136,6 @@ u32 nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector<
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PushGPUEntries(Tegra::CommandList&& entries) {
|
|
||||||
if (entries.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& dma_pusher{Core::System::GetInstance().GPU().DmaPusher()};
|
|
||||||
dma_pusher.Push(std::move(entries));
|
|
||||||
dma_pusher.DispatchCalls();
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& output) {
|
u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
|
@ -163,7 +153,7 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp
|
||||||
std::memcpy(entries.data(), &input[sizeof(IoctlSubmitGpfifo)],
|
std::memcpy(entries.data(), &input[sizeof(IoctlSubmitGpfifo)],
|
||||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||||
|
|
||||||
PushGPUEntries(std::move(entries));
|
Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries));
|
||||||
|
|
||||||
params.fence_out.id = 0;
|
params.fence_out.id = 0;
|
||||||
params.fence_out.value = 0;
|
params.fence_out.value = 0;
|
||||||
|
@ -184,7 +174,7 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
Memory::ReadBlock(params.address, entries.data(),
|
Memory::ReadBlock(params.address, entries.data(),
|
||||||
params.num_entries * sizeof(Tegra::CommandListHeader));
|
params.num_entries * sizeof(Tegra::CommandListHeader));
|
||||||
|
|
||||||
PushGPUEntries(std::move(entries));
|
Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries));
|
||||||
|
|
||||||
params.fence_out.id = 0;
|
params.fence_out.id = 0;
|
||||||
params.fence_out.value = 0;
|
params.fence_out.value = 0;
|
||||||
|
|
|
@ -186,7 +186,7 @@ void NVFlinger::Compose() {
|
||||||
|
|
||||||
// There was no queued buffer to draw, render previous frame
|
// There was no queued buffer to draw, render previous frame
|
||||||
system_instance.GetPerfStats().EndGameFrame();
|
system_instance.GetPerfStats().EndGameFrame();
|
||||||
system_instance.Renderer().SwapBuffers({});
|
system_instance.GPU().SwapBuffers({});
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,6 +65,16 @@ const DmaPusher& GPU::DmaPusher() const {
|
||||||
return *dma_pusher;
|
return *dma_pusher;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GPU::PushGPUEntries(Tegra::CommandList&& entries) {
|
||||||
|
dma_pusher->Push(std::move(entries));
|
||||||
|
dma_pusher->DispatchCalls();
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::SwapBuffers(
|
||||||
|
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
|
||||||
|
renderer.SwapBuffers(std::move(framebuffer));
|
||||||
|
}
|
||||||
|
|
||||||
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
|
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
|
||||||
ASSERT(format != RenderTargetFormat::NONE);
|
ASSERT(format != RenderTargetFormat::NONE);
|
||||||
|
|
||||||
|
|
|
@ -201,17 +201,26 @@ public:
|
||||||
};
|
};
|
||||||
} regs{};
|
} regs{};
|
||||||
|
|
||||||
|
/// Push GPU command entries to be processed
|
||||||
|
void PushGPUEntries(Tegra::CommandList&& entries);
|
||||||
|
|
||||||
|
/// Swap buffers (render frame)
|
||||||
|
void SwapBuffers(
|
||||||
|
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void ProcessBindMethod(const MethodCall& method_call);
|
void ProcessBindMethod(const MethodCall& method_call);
|
||||||
void ProcessSemaphoreTriggerMethod();
|
void ProcessSemaphoreTriggerMethod();
|
||||||
void ProcessSemaphoreRelease();
|
void ProcessSemaphoreRelease();
|
||||||
void ProcessSemaphoreAcquire();
|
void ProcessSemaphoreAcquire();
|
||||||
|
|
||||||
// Calls a GPU puller method.
|
/// Calls a GPU puller method.
|
||||||
void CallPullerMethod(const MethodCall& method_call);
|
void CallPullerMethod(const MethodCall& method_call);
|
||||||
// Calls a GPU engine method.
|
|
||||||
|
/// Calls a GPU engine method.
|
||||||
void CallEngineMethod(const MethodCall& method_call);
|
void CallEngineMethod(const MethodCall& method_call);
|
||||||
// Determines where the method should be executed.
|
|
||||||
|
/// Determines where the method should be executed.
|
||||||
bool ExecuteMethodOnEngine(const MethodCall& method_call);
|
bool ExecuteMethodOnEngine(const MethodCall& method_call);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
Loading…
Reference in New Issue