diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp index 2babc58817..d509f2644c 100644 --- a/src/common/heap_tracker.cpp +++ b/src/common/heap_tracker.cpp @@ -36,6 +36,7 @@ HeapTracker::~HeapTracker() = default; void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, bool is_separate_heap) { + bool rebuild_required = false; // When mapping other memory, map pages immediately. if (!is_separate_heap) { m_buffer.Map(virtual_offset, host_offset, length, perm, false); @@ -45,16 +46,28 @@ void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, // We are mapping part of a separate heap and insert into mappings. std::scoped_lock lk{m_lock}; m_map_count++; - m_mappings.insert_or_assign(virtual_offset, SeparateHeapMap{ + const auto it = m_mappings.insert_or_assign(virtual_offset, SeparateHeapMap{ .paddr = host_offset, .size = length, .tick = m_tick++, .perm = perm, .is_resident = false, }); + // Update tick before possible rebuild. + it.first->second.tick = m_tick++; + // Check if we need to rebuild. + if (m_resident_map_count >= m_max_resident_map_count) + rebuild_required = true; + // Map the area. + m_buffer.Map(it.first->first, it.first->second.paddr, it.first->second.size, it.first->second.perm, false); + // This map is now resident. + it.first->second.is_resident = true; + m_resident_map_count++; + m_resident_mappings.insert(*it.first); } - // Finally, map. - this->DeferredMapSeparateHeap(virtual_offset); + // A rebuild was required, so perform it now. + if (rebuild_required) + this->RebuildSeparateHeapAddressSpace(); } void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { @@ -129,46 +142,6 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p } } -bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { - if (m_buffer.IsInVirtualRange(fault_address)) - return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); - return false; -} - -bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { - bool rebuild_required = false; - { - std::scoped_lock lk{m_lock}; - - // Check to ensure this was a non-resident separate heap mapping. - const auto it = this->GetNearestHeapMapLocked(virtual_offset); - if (it == m_mappings.end() || it->second.is_resident) { - return false; - } - - // Update tick before possible rebuild. - it->second.tick = m_tick++; - - // Check if we need to rebuild. - if (m_resident_map_count > m_max_resident_map_count) { - rebuild_required = true; - } - - // Map the area. - m_buffer.Map(it->first, it->second.paddr, it->second.size, it->second.perm, false); - - // This map is now resident. - it->second.is_resident = true; - m_resident_map_count++; - m_resident_mappings.insert(*it); - } - - // A rebuild was required, so perform it now. - if (rebuild_required) - this->RebuildSeparateHeapAddressSpace(); - return true; -} - void HeapTracker::RebuildSeparateHeapAddressSpace() { std::scoped_lock lk{m_rebuild_lock, m_lock}; ASSERT(!m_resident_mappings.empty()); diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h index 18ec711d65..14b5401c18 100644 --- a/src/common/heap_tracker.h +++ b/src/common/heap_tracker.h @@ -32,8 +32,6 @@ public: inline u8* VirtualBasePointer() noexcept { return m_buffer.VirtualBasePointer(); } - bool DeferredMapSeparateHeap(u8* fault_address); - bool DeferredMapSeparateHeap(size_t virtual_offset); private: // TODO: You may want to "fake-map" the first 2GB of 64-bit address space // and dedicate it entirely to a recursive PTE mapping :) diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp index e6e9fc45be..9d26db51f7 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp @@ -3,47 +3,9 @@ #ifdef __linux__ -#include "common/signal_chain.h" - +//#include "common/signal_chain.h" #include "core/arm/dynarmic/arm_dynarmic.h" -#include "core/hle/kernel/k_process.h" -#include "core/memory.h" - -namespace Core { - -namespace { - -thread_local Core::Memory::Memory* g_current_memory{}; -std::once_flag g_registered{}; -struct sigaction g_old_segv {}; - -void HandleSigSegv(int sig, siginfo_t* info, void* ctx) { - if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) { - return; - } - - return g_old_segv.sa_sigaction(sig, info, ctx); -} - -} // namespace - -ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) { - g_current_memory = std::addressof(process->GetMemory()); -} - -ScopedJitExecution::~ScopedJitExecution() { - g_current_memory = nullptr; -} - -void ScopedJitExecution::RegisterHandler() { - std::call_once(g_registered, [] { - struct sigaction sa {}; - sa.sa_sigaction = &HandleSigSegv; - sa.sa_flags = SA_SIGINFO | SA_ONSTACK; - Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv)); - }); -} - -} // namespace Core +//#include "core/hle/kernel/k_process.h" +//#include "core/memory.h" #endif diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index 53dd188151..eef7c31160 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h @@ -26,24 +26,4 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) { return static_cast(hr); } -#ifdef __linux__ - -class ScopedJitExecution { -public: - explicit ScopedJitExecution(Kernel::KProcess* process); - ~ScopedJitExecution(); - static void RegisterHandler(); -}; - -#else - -class ScopedJitExecution { -public: - explicit ScopedJitExecution(Kernel::KProcess* process) {} - ~ScopedJitExecution() {} - static void RegisterHandler() {} -}; - -#endif - } // namespace Core diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index afbf178349..1731ef1aec 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -336,15 +336,11 @@ bool ArmDynarmic32::IsInThumbMode() const { } HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { - ScopedJitExecution sj(thread->GetOwnerProcess()); - m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Run()); } HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { - ScopedJitExecution sj(thread->GetOwnerProcess()); - m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Step()); } @@ -386,7 +382,6 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc m_cp15(std::make_shared(*this)), m_core_index{core_index} { auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); m_jit = MakeJit(&page_table_impl); - ScopedJitExecution::RegisterHandler(); } ArmDynarmic32::~ArmDynarmic32() = default; diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 99a80644ad..f9d1232f83 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -367,15 +367,11 @@ std::shared_ptr ArmDynarmic64::MakeJit(Common::PageTable* pa } HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { - ScopedJitExecution sj(thread->GetOwnerProcess()); - m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Run()); } HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { - ScopedJitExecution sj(thread->GetOwnerProcess()); - m_jit->ClearExclusiveState(); return TranslateHaltReason(m_jit->Step()); } @@ -415,7 +411,6 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc auto& page_table = process->GetPageTable().GetBasePageTable(); auto& page_table_impl = page_table.GetImpl(); m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); - ScopedJitExecution::RegisterHandler(); } ArmDynarmic64::~ArmDynarmic64() = default; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 80566b7e77..cf03353f84 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -1266,10 +1266,6 @@ void KProcess::InitializeInterfaces() { #ifdef HAS_NCE if (this->IsApplication() && Settings::IsNceEnabled()) { - // Register the scoped JIT handler before creating any NCE instances - // so that its signal handler will appear first in the signal chain. - Core::ScopedJitExecution::RegisterHandler(); - for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { m_arm_interfaces[i] = std::make_unique(m_kernel.System(), true, i); } diff --git a/src/core/memory.cpp b/src/core/memory.cpp index d63ccc8965..08391cd815 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -1228,22 +1228,7 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { if (rasterizer) { impl->InvalidateGPUMemory(ptr, size); } - -#ifdef __linux__ - if (!rasterizer && mapped) { - impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr)); - } -#endif - return mapped && ptr != nullptr; } -bool Memory::InvalidateSeparateHeap(void* fault_address) { -#ifdef __linux__ - return impl->buffer->DeferredMapSeparateHeap(static_cast(fault_address)); -#else - return false; -#endif -} - } // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index dcca26892b..99108ecf0d 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -487,13 +487,8 @@ public: * marked as debug or non-debug. */ void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug); - void SetGPUDirtyManagers(std::span managers); - bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); - - bool InvalidateSeparateHeap(void* fault_address); - private: Core::System& system;