mirror of
https://github.com/cemu-project/Cemu.git
synced 2025-12-19 04:37:05 +00:00
GX2+TCL: Reimplement command buffer submission
- GX2 utilizes TCL(.rpl) API for command submission instead of directly writing to an internal GPU fifo - Submission & retire timestamps are correctly implemented as incremental counters - Command buffering behaviour matches console - Fixes race conditions on aarch64
This commit is contained in:
parent
96765e4ac6
commit
28ea70b6d8
21 changed files with 761 additions and 472 deletions
|
|
@ -616,4 +616,36 @@ namespace stdx
|
|||
scope_exit& operator=(scope_exit) = delete;
|
||||
void release() { m_released = true;}
|
||||
};
|
||||
|
||||
// Xcode 16 doesn't have std::atomic_ref support and we provide a minimalist reimplementation as fallback
|
||||
#ifdef __cpp_lib_atomic_ref
|
||||
#include <atomic>
|
||||
template<typename T>
|
||||
using atomic_ref = std::atomic_ref<T>;
|
||||
#else
|
||||
template<typename T>
|
||||
class atomic_ref
|
||||
{
|
||||
static_assert(std::is_trivially_copyable<T>::value, "atomic_ref requires trivially copyable types");
|
||||
public:
|
||||
using value_type = T;
|
||||
|
||||
explicit atomic_ref(T& obj) noexcept : ptr_(std::addressof(obj)) {}
|
||||
|
||||
T load(std::memory_order order = std::memory_order_seq_cst) const noexcept
|
||||
{
|
||||
auto aptr = reinterpret_cast<std::atomic<T>*>(ptr_);
|
||||
return aptr->load(order);
|
||||
}
|
||||
|
||||
void store(T desired, std::memory_order order = std::memory_order_seq_cst) const noexcept
|
||||
{
|
||||
auto aptr = reinterpret_cast<std::atomic<T>*>(ptr_);
|
||||
aptr->store(desired, order);
|
||||
}
|
||||
|
||||
private:
|
||||
T* ptr_;
|
||||
};
|
||||
#endif
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue