0
0
Fork 0
mirror of https://github.com/ryujinx-mirror/ryujinx.git synced 2024-12-23 00:45:46 +00:00

Implement host tracked memory manager mode (#6356)

* Add host tracked memory manager mode

* Skipping flush is no longer needed

* Formatting + revert unrelated change

* LightningJit: Ensure that dest register is saved for load ops that do partial updates

* avoid allocations when doing address space lookup

Add missing improvement

* IsRmwMemory -> IsPartialRegisterUpdateMemory

* Ensure we iterate all private allocations in range

* PR feedback and potential fixes

* Simplified bridges a lot

* Skip calling SignalMappingChanged if Guest is true

* Late map bridge too

* Force address masking for prefetch instructions

* Reprotection for bridges

* Move partition list validation to separate debug method

* Move host tracked related classes to HostTracked folder

* New HostTracked namespace

* Move host tracked modes to the end of enum to avoid PPTC invalidation

---------

Co-authored-by: riperiperi <rhy3756547@hotmail.com>
This commit is contained in:
gdkchan 2024-03-26 23:33:24 -03:00 committed by GitHub
parent f6d24449b6
commit b323a01738
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
30 changed files with 2648 additions and 76 deletions

View file

@ -157,7 +157,7 @@ namespace ARMeilleure.Instructions
context.Copy(temp, value); context.Copy(temp, value);
if (!context.Memory.Type.IsHostMapped()) if (!context.Memory.Type.IsHostMappedOrTracked())
{ {
context.Branch(lblEnd); context.Branch(lblEnd);
@ -198,7 +198,7 @@ namespace ARMeilleure.Instructions
SetInt(context, rt, value); SetInt(context, rt, value);
if (!context.Memory.Type.IsHostMapped()) if (!context.Memory.Type.IsHostMappedOrTracked())
{ {
context.Branch(lblEnd); context.Branch(lblEnd);
@ -265,7 +265,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(rt), value); context.Copy(GetVec(rt), value);
if (!context.Memory.Type.IsHostMapped()) if (!context.Memory.Type.IsHostMappedOrTracked())
{ {
context.Branch(lblEnd); context.Branch(lblEnd);
@ -312,7 +312,7 @@ namespace ARMeilleure.Instructions
break; break;
} }
if (!context.Memory.Type.IsHostMapped()) if (!context.Memory.Type.IsHostMappedOrTracked())
{ {
context.Branch(lblEnd); context.Branch(lblEnd);
@ -385,7 +385,7 @@ namespace ARMeilleure.Instructions
break; break;
} }
if (!context.Memory.Type.IsHostMapped()) if (!context.Memory.Type.IsHostMappedOrTracked())
{ {
context.Branch(lblEnd); context.Branch(lblEnd);
@ -403,6 +403,27 @@ namespace ARMeilleure.Instructions
{ {
return EmitHostMappedPointer(context, address); return EmitHostMappedPointer(context, address);
} }
else if (context.Memory.Type.IsHostTracked())
{
if (address.Type == OperandType.I32)
{
address = context.ZeroExtend32(OperandType.I64, address);
}
if (context.Memory.Type == MemoryManagerType.HostTracked)
{
Operand mask = Const(ulong.MaxValue >> (64 - context.Memory.AddressSpaceBits));
address = context.BitwiseAnd(address, mask);
}
Operand ptBase = !context.HasPtc
? Const(context.Memory.PageTablePointer.ToInt64())
: Const(context.Memory.PageTablePointer.ToInt64(), Ptc.PageTableSymbol);
Operand ptOffset = context.ShiftRightUI(address, Const(PageBits));
return context.Add(address, context.Load(OperandType.I64, context.Add(ptBase, context.ShiftLeft(ptOffset, Const(3)))));
}
int ptLevelBits = context.Memory.AddressSpaceBits - PageBits; int ptLevelBits = context.Memory.AddressSpaceBits - PageBits;
int ptLevelSize = 1 << ptLevelBits; int ptLevelSize = 1 << ptLevelBits;

View file

@ -29,6 +29,18 @@ namespace ARMeilleure.Memory
/// Allows invalid access from JIT code to the rest of the program, but is faster. /// Allows invalid access from JIT code to the rest of the program, but is faster.
/// </summary> /// </summary>
HostMappedUnsafe, HostMappedUnsafe,
/// <summary>
/// High level implementation using a software flat page table for address translation
/// with no support for handling invalid or non-contiguous memory access.
/// </summary>
HostTracked,
/// <summary>
/// High level implementation using a software flat page table for address translation
/// without masking the address and no support for handling invalid or non-contiguous memory access.
/// </summary>
HostTrackedUnsafe,
} }
public static class MemoryManagerTypeExtensions public static class MemoryManagerTypeExtensions
@ -37,5 +49,15 @@ namespace ARMeilleure.Memory
{ {
return type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe; return type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe;
} }
public static bool IsHostTracked(this MemoryManagerType type)
{
return type == MemoryManagerType.HostTracked || type == MemoryManagerType.HostTrackedUnsafe;
}
public static bool IsHostMappedOrTracked(this MemoryManagerType type)
{
return type.IsHostMapped() || type.IsHostTracked();
}
} }
} }

View file

@ -21,10 +21,8 @@ namespace ARMeilleure.Signal
private const uint EXCEPTION_ACCESS_VIOLATION = 0xc0000005; private const uint EXCEPTION_ACCESS_VIOLATION = 0xc0000005;
private static Operand EmitGenericRegionCheck(EmitterContext context, IntPtr signalStructPtr, Operand faultAddress, Operand isWrite, int rangeStructSize, ulong pageSize) private static Operand EmitGenericRegionCheck(EmitterContext context, IntPtr signalStructPtr, Operand faultAddress, Operand isWrite, int rangeStructSize)
{ {
ulong pageMask = pageSize - 1;
Operand inRegionLocal = context.AllocateLocal(OperandType.I32); Operand inRegionLocal = context.AllocateLocal(OperandType.I32);
context.Copy(inRegionLocal, Const(0)); context.Copy(inRegionLocal, Const(0));
@ -51,7 +49,7 @@ namespace ARMeilleure.Signal
// Only call tracking if in range. // Only call tracking if in range.
context.BranchIfFalse(nextLabel, inRange, BasicBlockFrequency.Cold); context.BranchIfFalse(nextLabel, inRange, BasicBlockFrequency.Cold);
Operand offset = context.BitwiseAnd(context.Subtract(faultAddress, rangeAddress), Const(~pageMask)); Operand offset = context.Subtract(faultAddress, rangeAddress);
// Call the tracking action, with the pointer's relative offset to the base address. // Call the tracking action, with the pointer's relative offset to the base address.
Operand trackingActionPtr = context.Load(OperandType.I64, Const((ulong)signalStructPtr + rangeBaseOffset + 20)); Operand trackingActionPtr = context.Load(OperandType.I64, Const((ulong)signalStructPtr + rangeBaseOffset + 20));
@ -62,8 +60,10 @@ namespace ARMeilleure.Signal
// Tracking action should be non-null to call it, otherwise assume false return. // Tracking action should be non-null to call it, otherwise assume false return.
context.BranchIfFalse(skipActionLabel, trackingActionPtr); context.BranchIfFalse(skipActionLabel, trackingActionPtr);
Operand result = context.Call(trackingActionPtr, OperandType.I32, offset, Const(pageSize), isWrite); Operand result = context.Call(trackingActionPtr, OperandType.I64, offset, Const(1UL), isWrite);
context.Copy(inRegionLocal, result); context.Copy(inRegionLocal, context.ICompareNotEqual(result, Const(0UL)));
GenerateFaultAddressPatchCode(context, faultAddress, result);
context.MarkLabel(skipActionLabel); context.MarkLabel(skipActionLabel);
@ -155,7 +155,7 @@ namespace ARMeilleure.Signal
throw new PlatformNotSupportedException(); throw new PlatformNotSupportedException();
} }
public static byte[] GenerateUnixSignalHandler(IntPtr signalStructPtr, int rangeStructSize, ulong pageSize) public static byte[] GenerateUnixSignalHandler(IntPtr signalStructPtr, int rangeStructSize)
{ {
EmitterContext context = new(); EmitterContext context = new();
@ -168,7 +168,7 @@ namespace ARMeilleure.Signal
Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1. Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1.
Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize, pageSize); Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize);
Operand endLabel = Label(); Operand endLabel = Label();
@ -203,7 +203,7 @@ namespace ARMeilleure.Signal
return Compiler.Compile(cfg, argTypes, OperandType.None, CompilerOptions.HighCq, RuntimeInformation.ProcessArchitecture).Code; return Compiler.Compile(cfg, argTypes, OperandType.None, CompilerOptions.HighCq, RuntimeInformation.ProcessArchitecture).Code;
} }
public static byte[] GenerateWindowsSignalHandler(IntPtr signalStructPtr, int rangeStructSize, ulong pageSize) public static byte[] GenerateWindowsSignalHandler(IntPtr signalStructPtr, int rangeStructSize)
{ {
EmitterContext context = new(); EmitterContext context = new();
@ -232,7 +232,7 @@ namespace ARMeilleure.Signal
Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1. Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1.
Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize, pageSize); Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize);
Operand endLabel = Label(); Operand endLabel = Label();
@ -256,5 +256,86 @@ namespace ARMeilleure.Signal
return Compiler.Compile(cfg, argTypes, OperandType.I32, CompilerOptions.HighCq, RuntimeInformation.ProcessArchitecture).Code; return Compiler.Compile(cfg, argTypes, OperandType.I32, CompilerOptions.HighCq, RuntimeInformation.ProcessArchitecture).Code;
} }
private static void GenerateFaultAddressPatchCode(EmitterContext context, Operand faultAddress, Operand newAddress)
{
if (RuntimeInformation.ProcessArchitecture == Architecture.Arm64)
{
if (SupportsFaultAddressPatchingForHostOs())
{
Operand lblSkip = Label();
context.BranchIf(lblSkip, faultAddress, newAddress, Comparison.Equal);
Operand ucontextPtr = context.LoadArgument(OperandType.I64, 2);
Operand pcCtxAddress = default;
ulong baseRegsOffset = 0;
if (OperatingSystem.IsLinux())
{
pcCtxAddress = context.Add(ucontextPtr, Const(440UL));
baseRegsOffset = 184UL;
}
else if (OperatingSystem.IsMacOS() || OperatingSystem.IsIOS())
{
ucontextPtr = context.Load(OperandType.I64, context.Add(ucontextPtr, Const(48UL)));
pcCtxAddress = context.Add(ucontextPtr, Const(272UL));
baseRegsOffset = 16UL;
}
Operand pc = context.Load(OperandType.I64, pcCtxAddress);
Operand reg = GetAddressRegisterFromArm64Instruction(context, pc);
Operand reg64 = context.ZeroExtend32(OperandType.I64, reg);
Operand regCtxAddress = context.Add(ucontextPtr, context.Add(context.ShiftLeft(reg64, Const(3)), Const(baseRegsOffset)));
Operand regAddress = context.Load(OperandType.I64, regCtxAddress);
Operand addressDelta = context.Subtract(regAddress, faultAddress);
context.Store(regCtxAddress, context.Add(newAddress, addressDelta));
context.MarkLabel(lblSkip);
}
}
}
private static Operand GetAddressRegisterFromArm64Instruction(EmitterContext context, Operand pc)
{
Operand inst = context.Load(OperandType.I32, pc);
Operand reg = context.AllocateLocal(OperandType.I32);
Operand isSysInst = context.ICompareEqual(context.BitwiseAnd(inst, Const(0xFFF80000)), Const(0xD5080000));
Operand lblSys = Label();
Operand lblEnd = Label();
context.BranchIfTrue(lblSys, isSysInst, BasicBlockFrequency.Cold);
context.Copy(reg, context.BitwiseAnd(context.ShiftRightUI(inst, Const(5)), Const(0x1F)));
context.Branch(lblEnd);
context.MarkLabel(lblSys);
context.Copy(reg, context.BitwiseAnd(inst, Const(0x1F)));
context.MarkLabel(lblEnd);
return reg;
}
public static bool SupportsFaultAddressPatchingForHost()
{
return SupportsFaultAddressPatchingForHostArch() && SupportsFaultAddressPatchingForHostOs();
}
private static bool SupportsFaultAddressPatchingForHostArch()
{
return RuntimeInformation.ProcessArchitecture == Architecture.Arm64;
}
private static bool SupportsFaultAddressPatchingForHostOs()
{
return OperatingSystem.IsLinux() || OperatingSystem.IsMacOS() || OperatingSystem.IsIOS();
}
} }
} }

View file

@ -5,10 +5,10 @@ namespace Ryujinx.Common.Collections
/// </summary> /// </summary>
public class IntrusiveRedBlackTreeNode<T> where T : IntrusiveRedBlackTreeNode<T> public class IntrusiveRedBlackTreeNode<T> where T : IntrusiveRedBlackTreeNode<T>
{ {
internal bool Color = true; public bool Color = true;
internal T Left; public T Left;
internal T Right; public T Right;
internal T Parent; public T Parent;
public T Predecessor => IntrusiveRedBlackTreeImpl<T>.PredecessorOf((T)this); public T Predecessor => IntrusiveRedBlackTreeImpl<T>.PredecessorOf((T)this);
public T Successor => IntrusiveRedBlackTreeImpl<T>.SuccessorOf((T)this); public T Successor => IntrusiveRedBlackTreeImpl<T>.SuccessorOf((T)this);

View file

@ -38,7 +38,7 @@ namespace Ryujinx.Cpu.AppleHv
private readonly HvIpaAllocator _ipaAllocator; private readonly HvIpaAllocator _ipaAllocator;
public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, int blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None) public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, ulong blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
{ {
_ipaAllocator = ipaAllocator; _ipaAllocator = ipaAllocator;
} }

View file

@ -0,0 +1,35 @@
using Ryujinx.Common.Collections;
using System;
namespace Ryujinx.Cpu.Jit.HostTracked
{
internal class AddressIntrusiveRedBlackTree<T> : IntrusiveRedBlackTree<T> where T : IntrusiveRedBlackTreeNode<T>, IComparable<T>, IComparable<ulong>
{
/// <summary>
/// Retrieve the node that is considered equal to the specified address by the comparator.
/// </summary>
/// <param name="address">Address to compare with</param>
/// <returns>Node that is equal to <paramref name="address"/></returns>
public T GetNode(ulong address)
{
T node = Root;
while (node != null)
{
int cmp = node.CompareTo(address);
if (cmp < 0)
{
node = node.Left;
}
else if (cmp > 0)
{
node = node.Right;
}
else
{
return node;
}
}
return null;
}
}
}

View file

@ -0,0 +1,708 @@
using Ryujinx.Common;
using Ryujinx.Common.Collections;
using Ryujinx.Memory;
using System;
using System.Diagnostics;
using System.Threading;
namespace Ryujinx.Cpu.Jit.HostTracked
{
readonly struct PrivateRange
{
public readonly MemoryBlock Memory;
public readonly ulong Offset;
public readonly ulong Size;
public static PrivateRange Empty => new(null, 0, 0);
public PrivateRange(MemoryBlock memory, ulong offset, ulong size)
{
Memory = memory;
Offset = offset;
Size = size;
}
}
class AddressSpacePartition : IDisposable
{
public const ulong GuestPageSize = 0x1000;
private const int DefaultBlockAlignment = 1 << 20;
private enum MappingType : byte
{
None,
Private,
}
private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>, IComparable<ulong>
{
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public ulong EndAddress => Address + Size;
public MappingType Type { get; private set; }
public Mapping(ulong address, ulong size, MappingType type)
{
Address = address;
Size = size;
Type = type;
}
public Mapping Split(ulong splitAddress)
{
ulong leftSize = splitAddress - Address;
ulong rightSize = EndAddress - splitAddress;
Mapping left = new(Address, leftSize, Type);
Address = splitAddress;
Size = rightSize;
return left;
}
public void UpdateState(MappingType newType)
{
Type = newType;
}
public void Extend(ulong sizeDelta)
{
Size += sizeDelta;
}
public int CompareTo(Mapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
public int CompareTo(ulong address)
{
if (address < Address)
{
return -1;
}
else if (address <= EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>, IComparable<ulong>
{
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public ulong EndAddress => Address + Size;
public PrivateMemoryAllocation PrivateAllocation { get; private set; }
public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
{
Address = address;
Size = size;
PrivateAllocation = privateAllocation;
}
public PrivateMapping Split(ulong splitAddress)
{
ulong leftSize = splitAddress - Address;
ulong rightSize = EndAddress - splitAddress;
Debug.Assert(leftSize > 0);
Debug.Assert(rightSize > 0);
(var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
PrivateMapping left = new(Address, leftSize, leftAllocation);
Address = splitAddress;
Size = rightSize;
return left;
}
public void Map(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress, PrivateMemoryAllocation newAllocation)
{
baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address - baseAddress, Size);
PrivateAllocation = newAllocation;
}
public void Unmap(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress)
{
if (PrivateAllocation.IsValid)
{
baseBlock.UnmapView(PrivateAllocation.Memory, Address - baseAddress, Size);
PrivateAllocation.Dispose();
}
PrivateAllocation = default;
}
public void Extend(ulong sizeDelta)
{
Size += sizeDelta;
}
public int CompareTo(PrivateMapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
public int CompareTo(ulong address)
{
if (address < Address)
{
return -1;
}
else if (address <= EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private readonly MemoryBlock _backingMemory;
private readonly AddressSpacePartitionMultiAllocation _baseMemory;
private readonly PrivateMemoryAllocator _privateMemoryAllocator;
private readonly AddressIntrusiveRedBlackTree<Mapping> _mappingTree;
private readonly AddressIntrusiveRedBlackTree<PrivateMapping> _privateTree;
private readonly ReaderWriterLockSlim _treeLock;
private readonly ulong _hostPageSize;
private ulong? _firstPagePa;
private ulong? _lastPagePa;
private ulong _cachedFirstPagePa;
private ulong _cachedLastPagePa;
private MemoryBlock _firstPageMemoryForUnmap;
private ulong _firstPageOffsetForLateMap;
private MemoryPermission _firstPageMemoryProtection;
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddress => Address + Size;
public AddressSpacePartition(AddressSpacePartitionAllocation baseMemory, MemoryBlock backingMemory, ulong address, ulong size)
{
_privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable);
_mappingTree = new AddressIntrusiveRedBlackTree<Mapping>();
_privateTree = new AddressIntrusiveRedBlackTree<PrivateMapping>();
_treeLock = new ReaderWriterLockSlim();
_mappingTree.Add(new Mapping(address, size, MappingType.None));
_privateTree.Add(new PrivateMapping(address, size, default));
_hostPageSize = MemoryBlock.GetPageSize();
_backingMemory = backingMemory;
_baseMemory = new(baseMemory);
_cachedFirstPagePa = ulong.MaxValue;
_cachedLastPagePa = ulong.MaxValue;
Address = address;
Size = size;
}
public bool IsEmpty()
{
_treeLock.EnterReadLock();
try
{
Mapping map = _mappingTree.GetNode(Address);
return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None;
}
finally
{
_treeLock.ExitReadLock();
}
}
public void Map(ulong va, ulong pa, ulong size)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
if (va == Address)
{
_firstPagePa = pa;
}
if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
{
_lastPagePa = pa + ((EndAddress - GuestPageSize) - va);
}
Update(va, pa, size, MappingType.Private);
}
public void Unmap(ulong va, ulong size)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
if (va == Address)
{
_firstPagePa = null;
}
if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
{
_lastPagePa = null;
}
Update(va, 0UL, size, MappingType.None);
}
public void ReprotectAligned(ulong va, ulong size, MemoryPermission protection)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
_baseMemory.Reprotect(va - Address, size, protection, false);
if (va == Address)
{
_firstPageMemoryProtection = protection;
}
}
public void Reprotect(
ulong va,
ulong size,
MemoryPermission protection,
AddressSpacePartitioned addressSpace,
Action<ulong, IntPtr, ulong> updatePtCallback)
{
if (_baseMemory.LazyInitMirrorForProtection(addressSpace, Address, Size, protection))
{
LateMap();
}
updatePtCallback(va, _baseMemory.GetPointerForProtection(va - Address, size, protection), size);
}
public IntPtr GetPointer(ulong va, ulong size)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
return _baseMemory.GetPointer(va - Address, size);
}
public void InsertBridgeAtEnd(AddressSpacePartition partitionAfter, bool useProtectionMirrors)
{
ulong firstPagePa = partitionAfter?._firstPagePa ?? ulong.MaxValue;
ulong lastPagePa = _lastPagePa ?? ulong.MaxValue;
if (firstPagePa != _cachedFirstPagePa || lastPagePa != _cachedLastPagePa)
{
if (partitionAfter != null && partitionAfter._firstPagePa.HasValue)
{
(MemoryBlock firstPageMemory, ulong firstPageOffset) = partitionAfter.GetFirstPageMemoryAndOffset();
_baseMemory.MapView(firstPageMemory, firstPageOffset, Size, _hostPageSize);
if (!useProtectionMirrors)
{
_baseMemory.Reprotect(Size, _hostPageSize, partitionAfter._firstPageMemoryProtection, throwOnFail: false);
}
_firstPageMemoryForUnmap = firstPageMemory;
_firstPageOffsetForLateMap = firstPageOffset;
}
else
{
MemoryBlock firstPageMemoryForUnmap = _firstPageMemoryForUnmap;
if (firstPageMemoryForUnmap != null)
{
_baseMemory.UnmapView(firstPageMemoryForUnmap, Size, _hostPageSize);
_firstPageMemoryForUnmap = null;
}
}
_cachedFirstPagePa = firstPagePa;
_cachedLastPagePa = lastPagePa;
}
}
public void ReprotectBridge(MemoryPermission protection)
{
if (_firstPageMemoryForUnmap != null)
{
_baseMemory.Reprotect(Size, _hostPageSize, protection, throwOnFail: false);
}
}
private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset()
{
_treeLock.EnterReadLock();
try
{
PrivateMapping map = _privateTree.GetNode(Address);
if (map != null && map.PrivateAllocation.IsValid)
{
return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address));
}
}
finally
{
_treeLock.ExitReadLock();
}
return (_backingMemory, _firstPagePa.Value);
}
public PrivateRange GetPrivateAllocation(ulong va)
{
_treeLock.EnterReadLock();
try
{
PrivateMapping map = _privateTree.GetNode(va);
if (map != null && map.PrivateAllocation.IsValid)
{
return new(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (va - map.Address), map.Size - (va - map.Address));
}
}
finally
{
_treeLock.ExitReadLock();
}
return PrivateRange.Empty;
}
private void Update(ulong va, ulong pa, ulong size, MappingType type)
{
_treeLock.EnterWriteLock();
try
{
Mapping map = _mappingTree.GetNode(va);
Update(map, va, pa, size, type);
}
finally
{
_treeLock.ExitWriteLock();
}
}
private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
{
ulong endAddress = va + size;
for (; map != null; map = map.Successor)
{
if (map.Address < va)
{
_mappingTree.Add(map.Split(va));
}
if (map.EndAddress > endAddress)
{
Mapping newMap = map.Split(endAddress);
_mappingTree.Add(newMap);
map = newMap;
}
switch (type)
{
case MappingType.None:
ulong alignment = _hostPageSize;
bool unmappedBefore = map.Predecessor == null ||
(map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment));
bool unmappedAfter = map.Successor == null ||
(map.Successor.Type == MappingType.None && map.Successor.EndAddress >= BitUtils.AlignUp(endAddress, alignment));
UnmapPrivate(va, size, unmappedBefore, unmappedAfter);
break;
case MappingType.Private:
MapPrivate(va, size);
break;
}
map.UpdateState(type);
map = TryCoalesce(map);
if (map.EndAddress >= endAddress)
{
break;
}
}
return map;
}
private Mapping TryCoalesce(Mapping map)
{
Mapping previousMap = map.Predecessor;
Mapping nextMap = map.Successor;
if (previousMap != null && CanCoalesce(previousMap, map))
{
previousMap.Extend(map.Size);
_mappingTree.Remove(map);
map = previousMap;
}
if (nextMap != null && CanCoalesce(map, nextMap))
{
map.Extend(nextMap.Size);
_mappingTree.Remove(nextMap);
}
return map;
}
private static bool CanCoalesce(Mapping left, Mapping right)
{
return left.Type == right.Type;
}
private void MapPrivate(ulong va, ulong size)
{
ulong endAddress = va + size;
ulong alignment = _hostPageSize;
// Expand the range outwards based on page size to ensure that at least the requested region is mapped.
ulong vaAligned = BitUtils.AlignDown(va, alignment);
ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
PrivateMapping map = _privateTree.GetNode(va);
for (; map != null; map = map.Successor)
{
if (!map.PrivateAllocation.IsValid)
{
if (map.Address < vaAligned)
{
_privateTree.Add(map.Split(vaAligned));
}
if (map.EndAddress > endAddressAligned)
{
PrivateMapping newMap = map.Split(endAddressAligned);
_privateTree.Add(newMap);
map = newMap;
}
map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, _hostPageSize));
}
if (map.EndAddress >= endAddressAligned)
{
break;
}
}
}
private void UnmapPrivate(ulong va, ulong size, bool unmappedBefore, bool unmappedAfter)
{
ulong endAddress = va + size;
ulong alignment = _hostPageSize;
// If the adjacent mappings are unmapped, expand the range outwards,
// otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use.
ulong vaAligned = unmappedBefore ? BitUtils.AlignDown(va, alignment) : BitUtils.AlignUp(va, alignment);
ulong endAddressAligned = unmappedAfter ? BitUtils.AlignUp(endAddress, alignment) : BitUtils.AlignDown(endAddress, alignment);
if (endAddressAligned <= vaAligned)
{
return;
}
PrivateMapping map = _privateTree.GetNode(vaAligned);
for (; map != null; map = map.Successor)
{
if (map.PrivateAllocation.IsValid)
{
if (map.Address < vaAligned)
{
_privateTree.Add(map.Split(vaAligned));
}
if (map.EndAddress > endAddressAligned)
{
PrivateMapping newMap = map.Split(endAddressAligned);
_privateTree.Add(newMap);
map = newMap;
}
map.Unmap(_baseMemory, Address);
map = TryCoalesce(map);
}
if (map.EndAddress >= endAddressAligned)
{
break;
}
}
}
private PrivateMapping TryCoalesce(PrivateMapping map)
{
PrivateMapping previousMap = map.Predecessor;
PrivateMapping nextMap = map.Successor;
if (previousMap != null && CanCoalesce(previousMap, map))
{
previousMap.Extend(map.Size);
_privateTree.Remove(map);
map = previousMap;
}
if (nextMap != null && CanCoalesce(map, nextMap))
{
map.Extend(nextMap.Size);
_privateTree.Remove(nextMap);
}
return map;
}
private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
{
return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
}
private void LateMap()
{
// Map all existing private allocations.
// This is necessary to ensure mirrors that are lazily created have the same mappings as the main one.
PrivateMapping map = _privateTree.GetNode(Address);
for (; map != null; map = map.Successor)
{
if (map.PrivateAllocation.IsValid)
{
_baseMemory.LateMapView(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset, map.Address - Address, map.Size);
}
}
MemoryBlock firstPageMemory = _firstPageMemoryForUnmap;
ulong firstPageOffset = _firstPageOffsetForLateMap;
if (firstPageMemory != null)
{
_baseMemory.LateMapView(firstPageMemory, firstPageOffset, Size, _hostPageSize);
}
}
public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
{
_treeLock.EnterReadLock();
try
{
PrivateMapping map = _privateTree.GetNode(va);
nextVa = map.EndAddress;
if (map != null && map.PrivateAllocation.IsValid)
{
ulong startOffset = va - map.Address;
return new(
map.PrivateAllocation.Memory,
map.PrivateAllocation.Offset + startOffset,
Math.Min(map.PrivateAllocation.Size - startOffset, size));
}
}
finally
{
_treeLock.ExitReadLock();
}
return PrivateRange.Empty;
}
public bool HasPrivateAllocation(ulong va, ulong size, ulong startVa, ulong startSize, ref PrivateRange range)
{
ulong endVa = va + size;
_treeLock.EnterReadLock();
try
{
for (PrivateMapping map = _privateTree.GetNode(va); map != null && map.Address < endVa; map = map.Successor)
{
if (map.PrivateAllocation.IsValid)
{
if (map.Address <= startVa && map.EndAddress >= startVa + startSize)
{
ulong startOffset = startVa - map.Address;
range = new(
map.PrivateAllocation.Memory,
map.PrivateAllocation.Offset + startOffset,
Math.Min(map.PrivateAllocation.Size - startOffset, startSize));
}
return true;
}
}
}
finally
{
_treeLock.ExitReadLock();
}
return false;
}
public void Dispose()
{
GC.SuppressFinalize(this);
_privateMemoryAllocator.Dispose();
_baseMemory.Dispose();
}
}
}

View file

@ -0,0 +1,202 @@
using Ryujinx.Common;
using Ryujinx.Common.Collections;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
using System;
namespace Ryujinx.Cpu.Jit.HostTracked
{
readonly struct AddressSpacePartitionAllocation : IDisposable
{
private readonly AddressSpacePartitionAllocator _owner;
private readonly PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>.Allocation _allocation;
public IntPtr Pointer => (IntPtr)((ulong)_allocation.Block.Memory.Pointer + _allocation.Offset);
public bool IsValid => _owner != null;
public AddressSpacePartitionAllocation(
AddressSpacePartitionAllocator owner,
PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>.Allocation allocation)
{
_owner = owner;
_allocation = allocation;
}
public void RegisterMapping(ulong va, ulong endVa)
{
_allocation.Block.AddMapping(_allocation.Offset, _allocation.Size, va, endVa);
}
public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
{
_allocation.Block.Memory.MapView(srcBlock, srcOffset, _allocation.Offset + dstOffset, size);
}
public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
{
_allocation.Block.Memory.UnmapView(srcBlock, _allocation.Offset + offset, size);
}
public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail)
{
_allocation.Block.Memory.Reprotect(_allocation.Offset + offset, size, permission, throwOnFail);
}
public IntPtr GetPointer(ulong offset, ulong size)
{
return _allocation.Block.Memory.GetPointer(_allocation.Offset + offset, size);
}
public void Dispose()
{
_allocation.Block.RemoveMapping(_allocation.Offset, _allocation.Size);
_owner.Free(_allocation.Block, _allocation.Offset, _allocation.Size);
}
}
class AddressSpacePartitionAllocator : PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>
{
private const ulong DefaultBlockAlignment = 1UL << 32; // 4GB
public class Block : PrivateMemoryAllocator.Block
{
private readonly MemoryTracking _tracking;
private readonly Func<ulong, ulong> _readPtCallback;
private readonly MemoryEhMeilleure _memoryEh;
private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>, IComparable<ulong>
{
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddress => Address + Size;
public ulong Va { get; }
public ulong EndVa { get; }
public Mapping(ulong address, ulong size, ulong va, ulong endVa)
{
Address = address;
Size = size;
Va = va;
EndVa = endVa;
}
public int CompareTo(Mapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
public int CompareTo(ulong address)
{
if (address < Address)
{
return -1;
}
else if (address <= EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private readonly AddressIntrusiveRedBlackTree<Mapping> _mappingTree;
private readonly object _lock;
public Block(MemoryTracking tracking, Func<ulong, ulong> readPtCallback, MemoryBlock memory, ulong size, object locker) : base(memory, size)
{
_tracking = tracking;
_readPtCallback = readPtCallback;
_memoryEh = new(memory, null, tracking, VirtualMemoryEvent);
_mappingTree = new();
_lock = locker;
}
public void AddMapping(ulong offset, ulong size, ulong va, ulong endVa)
{
_mappingTree.Add(new(offset, size, va, endVa));
}
public void RemoveMapping(ulong offset, ulong size)
{
_mappingTree.Remove(_mappingTree.GetNode(offset));
}
private ulong VirtualMemoryEvent(ulong address, ulong size, bool write)
{
Mapping map;
lock (_lock)
{
map = _mappingTree.GetNode(address);
}
if (map == null)
{
return 0;
}
address -= map.Address;
ulong addressAligned = BitUtils.AlignDown(address, AddressSpacePartition.GuestPageSize);
ulong endAddressAligned = BitUtils.AlignUp(address + size, AddressSpacePartition.GuestPageSize);
ulong sizeAligned = endAddressAligned - addressAligned;
if (!_tracking.VirtualMemoryEvent(map.Va + addressAligned, sizeAligned, write))
{
return 0;
}
return _readPtCallback(map.Va + address);
}
public override void Destroy()
{
_memoryEh.Dispose();
base.Destroy();
}
}
private readonly MemoryTracking _tracking;
private readonly Func<ulong, ulong> _readPtCallback;
private readonly object _lock;
public AddressSpacePartitionAllocator(
MemoryTracking tracking,
Func<ulong, ulong> readPtCallback,
object locker) : base(DefaultBlockAlignment, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible)
{
_tracking = tracking;
_readPtCallback = readPtCallback;
_lock = locker;
}
public AddressSpacePartitionAllocation Allocate(ulong va, ulong size)
{
AddressSpacePartitionAllocation allocation = new(this, Allocate(size, MemoryBlock.GetPageSize(), CreateBlock));
allocation.RegisterMapping(va, va + size);
return allocation;
}
private Block CreateBlock(MemoryBlock memory, ulong size)
{
return new Block(_tracking, _readPtCallback, memory, size, _lock);
}
}
}

View file

@ -0,0 +1,101 @@
using Ryujinx.Memory;
using System;
using System.Diagnostics;
namespace Ryujinx.Cpu.Jit.HostTracked
{
class AddressSpacePartitionMultiAllocation : IDisposable
{
private readonly AddressSpacePartitionAllocation _baseMemory;
private AddressSpacePartitionAllocation _baseMemoryRo;
private AddressSpacePartitionAllocation _baseMemoryNone;
public AddressSpacePartitionMultiAllocation(AddressSpacePartitionAllocation baseMemory)
{
_baseMemory = baseMemory;
}
public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
{
_baseMemory.MapView(srcBlock, srcOffset, dstOffset, size);
if (_baseMemoryRo.IsValid)
{
_baseMemoryRo.MapView(srcBlock, srcOffset, dstOffset, size);
_baseMemoryRo.Reprotect(dstOffset, size, MemoryPermission.Read, false);
}
}
public void LateMapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
{
_baseMemoryRo.MapView(srcBlock, srcOffset, dstOffset, size);
_baseMemoryRo.Reprotect(dstOffset, size, MemoryPermission.Read, false);
}
public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
{
_baseMemory.UnmapView(srcBlock, offset, size);
if (_baseMemoryRo.IsValid)
{
_baseMemoryRo.UnmapView(srcBlock, offset, size);
}
}
public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail)
{
_baseMemory.Reprotect(offset, size, permission, throwOnFail);
}
public IntPtr GetPointer(ulong offset, ulong size)
{
return _baseMemory.GetPointer(offset, size);
}
public bool LazyInitMirrorForProtection(AddressSpacePartitioned addressSpace, ulong blockAddress, ulong blockSize, MemoryPermission permission)
{
if (permission == MemoryPermission.None && !_baseMemoryNone.IsValid)
{
_baseMemoryNone = addressSpace.CreateAsPartitionAllocation(blockAddress, blockSize);
}
else if (permission == MemoryPermission.Read && !_baseMemoryRo.IsValid)
{
_baseMemoryRo = addressSpace.CreateAsPartitionAllocation(blockAddress, blockSize);
return true;
}
return false;
}
public IntPtr GetPointerForProtection(ulong offset, ulong size, MemoryPermission permission)
{
AddressSpacePartitionAllocation allocation = permission switch
{
MemoryPermission.ReadAndWrite => _baseMemory,
MemoryPermission.Read => _baseMemoryRo,
MemoryPermission.None => _baseMemoryNone,
_ => throw new ArgumentException($"Invalid protection \"{permission}\"."),
};
Debug.Assert(allocation.IsValid);
return allocation.GetPointer(offset, size);
}
public void Dispose()
{
_baseMemory.Dispose();
if (_baseMemoryRo.IsValid)
{
_baseMemoryRo.Dispose();
}
if (_baseMemoryNone.IsValid)
{
_baseMemoryNone.Dispose();
}
}
}
}

View file

@ -0,0 +1,407 @@
using Ryujinx.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Diagnostics;
namespace Ryujinx.Cpu.Jit.HostTracked
{
class AddressSpacePartitioned : IDisposable
{
private const int PartitionBits = 25;
private const ulong PartitionSize = 1UL << PartitionBits;
private readonly MemoryBlock _backingMemory;
private readonly List<AddressSpacePartition> _partitions;
private readonly AddressSpacePartitionAllocator _asAllocator;
private readonly Action<ulong, IntPtr, ulong> _updatePtCallback;
private readonly bool _useProtectionMirrors;
public AddressSpacePartitioned(MemoryTracking tracking, MemoryBlock backingMemory, NativePageTable nativePageTable, bool useProtectionMirrors)
{
_backingMemory = backingMemory;
_partitions = new();
_asAllocator = new(tracking, nativePageTable.Read, _partitions);
_updatePtCallback = nativePageTable.Update;
_useProtectionMirrors = useProtectionMirrors;
}
public void Map(ulong va, ulong pa, ulong size)
{
ulong endVa = va + size;
lock (_partitions)
{
EnsurePartitionsLocked(va, size);
while (va < endVa)
{
int partitionIndex = FindPartitionIndexLocked(va);
AddressSpacePartition partition = _partitions[partitionIndex];
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
partition.Map(clampedVa, pa, clampedEndVa - clampedVa);
ulong currentSize = clampedEndVa - clampedVa;
va += currentSize;
pa += currentSize;
InsertOrRemoveBridgeIfNeeded(partitionIndex);
}
}
}
public void Unmap(ulong va, ulong size)
{
ulong endVa = va + size;
while (va < endVa)
{
AddressSpacePartition partition;
lock (_partitions)
{
int partitionIndex = FindPartitionIndexLocked(va);
if (partitionIndex < 0)
{
va += PartitionSize - (va & (PartitionSize - 1));
continue;
}
partition = _partitions[partitionIndex];
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
partition.Unmap(clampedVa, clampedEndVa - clampedVa);
va += clampedEndVa - clampedVa;
InsertOrRemoveBridgeIfNeeded(partitionIndex);
if (partition.IsEmpty())
{
_partitions.Remove(partition);
partition.Dispose();
}
}
}
}
public void Reprotect(ulong va, ulong size, MemoryPermission protection)
{
ulong endVa = va + size;
lock (_partitions)
{
while (va < endVa)
{
AddressSpacePartition partition = FindPartitionWithIndex(va, out int partitionIndex);
if (partition == null)
{
va += PartitionSize - (va & (PartitionSize - 1));
continue;
}
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
if (_useProtectionMirrors)
{
partition.Reprotect(clampedVa, clampedEndVa - clampedVa, protection, this, _updatePtCallback);
}
else
{
partition.ReprotectAligned(clampedVa, clampedEndVa - clampedVa, protection);
if (clampedVa == partition.Address &&
partitionIndex > 0 &&
_partitions[partitionIndex - 1].EndAddress == partition.Address)
{
_partitions[partitionIndex - 1].ReprotectBridge(protection);
}
}
va += clampedEndVa - clampedVa;
}
}
}
public PrivateRange GetPrivateAllocation(ulong va)
{
AddressSpacePartition partition = FindPartition(va);
if (partition == null)
{
return PrivateRange.Empty;
}
return partition.GetPrivateAllocation(va);
}
public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
{
AddressSpacePartition partition = FindPartition(va);
if (partition == null)
{
nextVa = (va & ~(PartitionSize - 1)) + PartitionSize;
return PrivateRange.Empty;
}
return partition.GetFirstPrivateAllocation(va, size, out nextVa);
}
public bool HasAnyPrivateAllocation(ulong va, ulong size, out PrivateRange range)
{
range = PrivateRange.Empty;
ulong startVa = va;
ulong endVa = va + size;
while (va < endVa)
{
AddressSpacePartition partition = FindPartition(va);
if (partition == null)
{
va += PartitionSize - (va & (PartitionSize - 1));
continue;
}
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa, startVa, size, ref range))
{
return true;
}
va += clampedEndVa - clampedVa;
}
return false;
}
private void InsertOrRemoveBridgeIfNeeded(int partitionIndex)
{
if (partitionIndex > 0)
{
if (_partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address)
{
_partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _useProtectionMirrors);
}
else
{
_partitions[partitionIndex - 1].InsertBridgeAtEnd(null, _useProtectionMirrors);
}
}
if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address)
{
_partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _useProtectionMirrors);
}
else
{
_partitions[partitionIndex].InsertBridgeAtEnd(null, _useProtectionMirrors);
}
}
public IntPtr GetPointer(ulong va, ulong size)
{
AddressSpacePartition partition = FindPartition(va);
return partition.GetPointer(va, size);
}
private static (ulong, ulong) ClampRange(AddressSpacePartition partition, ulong va, ulong endVa)
{
if (va < partition.Address)
{
va = partition.Address;
}
if (endVa > partition.EndAddress)
{
endVa = partition.EndAddress;
}
return (va, endVa);
}
private AddressSpacePartition FindPartition(ulong va)
{
lock (_partitions)
{
int index = FindPartitionIndexLocked(va);
if (index >= 0)
{
return _partitions[index];
}
}
return null;
}
private AddressSpacePartition FindPartitionWithIndex(ulong va, out int index)
{
lock (_partitions)
{
index = FindPartitionIndexLocked(va);
if (index >= 0)
{
return _partitions[index];
}
}
return null;
}
private int FindPartitionIndexLocked(ulong va)
{
int left = 0;
int middle;
int right = _partitions.Count - 1;
while (left <= right)
{
middle = left + ((right - left) >> 1);
AddressSpacePartition partition = _partitions[middle];
if (partition.Address <= va && partition.EndAddress > va)
{
return middle;
}
if (partition.Address >= va)
{
right = middle - 1;
}
else
{
left = middle + 1;
}
}
return -1;
}
private void EnsurePartitionsLocked(ulong va, ulong size)
{
ulong endVa = BitUtils.AlignUp(va + size, PartitionSize);
va = BitUtils.AlignDown(va, PartitionSize);
for (int i = 0; i < _partitions.Count && va < endVa; i++)
{
AddressSpacePartition partition = _partitions[i];
if (partition.Address <= va && partition.EndAddress > va)
{
if (partition.EndAddress >= endVa)
{
// Fully mapped already.
va = endVa;
break;
}
ulong gapSize;
if (i + 1 < _partitions.Count)
{
AddressSpacePartition nextPartition = _partitions[i + 1];
if (partition.EndAddress == nextPartition.Address)
{
va = partition.EndAddress;
continue;
}
gapSize = Math.Min(endVa, nextPartition.Address) - partition.EndAddress;
}
else
{
gapSize = endVa - partition.EndAddress;
}
_partitions.Insert(i + 1, CreateAsPartition(partition.EndAddress, gapSize));
va = partition.EndAddress + gapSize;
i++;
}
else if (partition.EndAddress > va)
{
Debug.Assert(partition.Address > va);
ulong gapSize;
if (partition.Address < endVa)
{
gapSize = partition.Address - va;
}
else
{
gapSize = endVa - va;
}
_partitions.Insert(i, CreateAsPartition(va, gapSize));
va = Math.Min(partition.EndAddress, endVa);
i++;
}
}
if (va < endVa)
{
_partitions.Add(CreateAsPartition(va, endVa - va));
}
ValidatePartitionList();
}
[Conditional("DEBUG")]
private void ValidatePartitionList()
{
for (int i = 1; i < _partitions.Count; i++)
{
Debug.Assert(_partitions[i].Address > _partitions[i - 1].Address);
Debug.Assert(_partitions[i].EndAddress > _partitions[i - 1].EndAddress);
}
}
private AddressSpacePartition CreateAsPartition(ulong va, ulong size)
{
return new(CreateAsPartitionAllocation(va, size), _backingMemory, va, size);
}
public AddressSpacePartitionAllocation CreateAsPartitionAllocation(ulong va, ulong size)
{
return _asAllocator.Allocate(va, size + MemoryBlock.GetPageSize());
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
foreach (AddressSpacePartition partition in _partitions)
{
partition.Dispose();
}
_partitions.Clear();
_asAllocator.Dispose();
}
}
public void Dispose()
{
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}
}

View file

@ -0,0 +1,223 @@
using Ryujinx.Cpu.Signal;
using Ryujinx.Memory;
using System;
using System.Diagnostics;
using System.Numerics;
using System.Runtime.InteropServices;
namespace Ryujinx.Cpu.Jit.HostTracked
{
sealed class NativePageTable : IDisposable
{
private delegate ulong TrackingEventDelegate(ulong address, ulong size, bool write);
private const int PageBits = 12;
private const int PageSize = 1 << PageBits;
private const int PageMask = PageSize - 1;
private const int PteSize = 8;
private readonly int _bitsPerPtPage;
private readonly int _entriesPerPtPage;
private readonly int _pageCommitmentBits;
private readonly PageTable<ulong> _pageTable;
private readonly MemoryBlock _nativePageTable;
private readonly ulong[] _pageCommitmentBitmap;
private readonly ulong _hostPageSize;
private readonly TrackingEventDelegate _trackingEvent;
private bool _disposed;
public IntPtr PageTablePointer => _nativePageTable.Pointer;
public NativePageTable(ulong asSize)
{
ulong hostPageSize = MemoryBlock.GetPageSize();
_entriesPerPtPage = (int)(hostPageSize / sizeof(ulong));
_bitsPerPtPage = BitOperations.Log2((uint)_entriesPerPtPage);
_pageCommitmentBits = PageBits + _bitsPerPtPage;
_hostPageSize = hostPageSize;
_pageTable = new PageTable<ulong>();
_nativePageTable = new MemoryBlock((asSize / PageSize) * PteSize + _hostPageSize, MemoryAllocationFlags.Reserve);
_pageCommitmentBitmap = new ulong[(asSize >> _pageCommitmentBits) / (sizeof(ulong) * 8)];
ulong ptStart = (ulong)_nativePageTable.Pointer;
ulong ptEnd = ptStart + _nativePageTable.Size;
_trackingEvent = VirtualMemoryEvent;
bool added = NativeSignalHandler.AddTrackedRegion((nuint)ptStart, (nuint)ptEnd, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added)
{
throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
}
}
public void Map(ulong va, ulong pa, ulong size, AddressSpacePartitioned addressSpace, MemoryBlock backingMemory, bool privateMap)
{
while (size != 0)
{
_pageTable.Map(va, pa);
EnsureCommitment(va);
if (privateMap)
{
_nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, addressSpace.GetPointer(va, PageSize)));
}
else
{
_nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, backingMemory.GetPointer(pa, PageSize)));
}
va += PageSize;
pa += PageSize;
size -= PageSize;
}
}
public void Unmap(ulong va, ulong size)
{
IntPtr guardPagePtr = GetGuardPagePointer();
while (size != 0)
{
_pageTable.Unmap(va);
_nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, guardPagePtr));
va += PageSize;
size -= PageSize;
}
}
public ulong Read(ulong va)
{
ulong pte = _nativePageTable.Read<ulong>((va / PageSize) * PteSize);
pte += va & ~(ulong)PageMask;
return pte + (va & PageMask);
}
public void Update(ulong va, IntPtr ptr, ulong size)
{
ulong remainingSize = size;
while (remainingSize != 0)
{
EnsureCommitment(va);
_nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, ptr));
va += PageSize;
ptr += PageSize;
remainingSize -= PageSize;
}
}
private void EnsureCommitment(ulong va)
{
ulong bit = va >> _pageCommitmentBits;
int index = (int)(bit / (sizeof(ulong) * 8));
int shift = (int)(bit % (sizeof(ulong) * 8));
ulong mask = 1UL << shift;
ulong oldMask = _pageCommitmentBitmap[index];
if ((oldMask & mask) == 0)
{
lock (_pageCommitmentBitmap)
{
oldMask = _pageCommitmentBitmap[index];
if ((oldMask & mask) != 0)
{
return;
}
_nativePageTable.Commit(bit * _hostPageSize, _hostPageSize);
Span<ulong> pageSpan = MemoryMarshal.Cast<byte, ulong>(_nativePageTable.GetSpan(bit * _hostPageSize, (int)_hostPageSize));
Debug.Assert(pageSpan.Length == _entriesPerPtPage);
IntPtr guardPagePtr = GetGuardPagePointer();
for (int i = 0; i < pageSpan.Length; i++)
{
pageSpan[i] = GetPte((bit << _pageCommitmentBits) | ((ulong)i * PageSize), guardPagePtr);
}
_pageCommitmentBitmap[index] = oldMask | mask;
}
}
}
private IntPtr GetGuardPagePointer()
{
return _nativePageTable.GetPointer(_nativePageTable.Size - _hostPageSize, _hostPageSize);
}
private static ulong GetPte(ulong va, IntPtr ptr)
{
Debug.Assert((va & PageMask) == 0);
return (ulong)ptr - va;
}
public ulong GetPhysicalAddress(ulong va)
{
return _pageTable.Read(va) + (va & PageMask);
}
private ulong VirtualMemoryEvent(ulong address, ulong size, bool write)
{
if (address < _nativePageTable.Size - _hostPageSize)
{
// Some prefetch instructions do not cause faults with invalid addresses.
// Retry if we are hitting a case where the page table is unmapped, the next
// run will execute the actual instruction.
// The address loaded from the page table will be invalid, and it should hit the else case
// if the instruction faults on unmapped or protected memory.
ulong va = address * (PageSize / sizeof(ulong));
EnsureCommitment(va);
return (ulong)_nativePageTable.Pointer + address;
}
else
{
throw new InvalidMemoryRegionException();
}
}
private void Dispose(bool disposing)
{
if (!_disposed)
{
if (disposing)
{
NativeSignalHandler.RemoveTrackedRegion((nuint)_nativePageTable.Pointer);
_nativePageTable.Dispose();
}
_disposed = true;
}
}
public void Dispose()
{
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}
}

View file

@ -15,9 +15,9 @@ namespace Ryujinx.Cpu.Jit
_tickSource = tickSource; _tickSource = tickSource;
_translator = new Translator(new JitMemoryAllocator(forJit: true), memory, for64Bit); _translator = new Translator(new JitMemoryAllocator(forJit: true), memory, for64Bit);
if (memory.Type.IsHostMapped()) if (memory.Type.IsHostMappedOrTracked())
{ {
NativeSignalHandler.InitializeSignalHandler(MemoryBlock.GetPageSize()); NativeSignalHandler.InitializeSignalHandler();
} }
memory.UnmapEvent += UnmapHandler; memory.UnmapEvent += UnmapHandler;

View file

@ -0,0 +1,627 @@
using ARMeilleure.Memory;
using Ryujinx.Cpu.Jit.HostTracked;
using Ryujinx.Cpu.Signal;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.Cpu.Jit
{
/// <summary>
/// Represents a CPU memory manager which maps guest virtual memory directly onto a host virtual region.
/// </summary>
public sealed class MemoryManagerHostTracked : VirtualMemoryManagerRefCountedBase<ulong, ulong>, IWritableBlock, IMemoryManager, IVirtualMemoryManagerTracked
{
private readonly InvalidAccessHandler _invalidAccessHandler;
private readonly bool _unsafeMode;
private readonly MemoryBlock _backingMemory;
public int AddressSpaceBits { get; }
public MemoryTracking Tracking { get; }
private readonly NativePageTable _nativePageTable;
private readonly AddressSpacePartitioned _addressSpace;
private readonly ManagedPageFlags _pages;
protected override ulong AddressSpaceSize { get; }
/// <inheritdoc/>
public bool Supports4KBPages => false;
public IntPtr PageTablePointer => _nativePageTable.PageTablePointer;
public MemoryManagerType Type => _unsafeMode ? MemoryManagerType.HostTrackedUnsafe : MemoryManagerType.HostTracked;
public event Action<ulong, ulong> UnmapEvent;
/// <summary>
/// Creates a new instance of the host tracked memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
/// <param name="unsafeMode">True if unmanaged access should not be masked (unsafe), false otherwise.</param>
/// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
public MemoryManagerHostTracked(MemoryBlock backingMemory, ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler)
{
bool useProtectionMirrors = MemoryBlock.GetPageSize() > PageSize;
Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler, useProtectionMirrors);
_backingMemory = backingMemory;
_invalidAccessHandler = invalidAccessHandler;
_unsafeMode = unsafeMode;
AddressSpaceSize = addressSpaceSize;
ulong asSize = PageSize;
int asBits = PageBits;
while (asSize < AddressSpaceSize)
{
asSize <<= 1;
asBits++;
}
AddressSpaceBits = asBits;
if (useProtectionMirrors && !NativeSignalHandler.SupportsFaultAddressPatching())
{
// Currently we require being able to change the fault address to something else
// in order to "emulate" 4KB granularity protection on systems with larger page size.
throw new PlatformNotSupportedException();
}
_pages = new ManagedPageFlags(asBits);
_nativePageTable = new(asSize);
_addressSpace = new(Tracking, backingMemory, _nativePageTable, useProtectionMirrors);
}
/// <inheritdoc/>
public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
AssertValidAddressAndSize(va, size);
if (flags.HasFlag(MemoryMapFlags.Private))
{
_addressSpace.Map(va, pa, size);
}
_pages.AddMapping(va, size);
_nativePageTable.Map(va, pa, size, _addressSpace, _backingMemory, flags.HasFlag(MemoryMapFlags.Private));
Tracking.Map(va, size);
}
/// <inheritdoc/>
public void MapForeign(ulong va, nuint hostPointer, ulong size)
{
throw new NotSupportedException();
}
/// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
_addressSpace.Unmap(va, size);
UnmapEvent?.Invoke(va, size);
Tracking.Unmap(va, size);
_pages.RemoveMapping(va, size);
_nativePageTable.Unmap(va, size);
}
public T Read<T>(ulong va) where T : unmanaged
{
return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
}
public T ReadTracked<T>(ulong va) where T : unmanaged
{
try
{
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), false);
return Read<T>(va);
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
return default;
}
}
public override void Read(ulong va, Span<byte> data)
{
ReadImpl(va, data);
}
public void Write<T>(ulong va, T value) where T : unmanaged
{
Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
}
public void Write(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return;
}
SignalMemoryTracking(va, (ulong)data.Length, true);
WriteImpl(va, data);
}
public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return;
}
WriteImpl(va, data);
}
public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return false;
}
SignalMemoryTracking(va, (ulong)data.Length, false);
if (TryGetVirtualContiguous(va, data.Length, out MemoryBlock memoryBlock, out ulong offset))
{
var target = memoryBlock.GetSpan(offset, data.Length);
bool changed = !data.SequenceEqual(target);
if (changed)
{
data.CopyTo(target);
}
return changed;
}
else
{
WriteImpl(va, data);
return true;
}
}
private void WriteImpl(ulong va, ReadOnlySpan<byte> data)
{
try
{
AssertValidAddressAndSize(va, (ulong)data.Length);
ulong endVa = va + (ulong)data.Length;
int offset = 0;
while (va < endVa)
{
(MemoryBlock memory, ulong rangeOffset, ulong copySize) = GetMemoryOffsetAndSize(va, (ulong)(data.Length - offset));
data.Slice(offset, (int)copySize).CopyTo(memory.GetSpan(rangeOffset, (int)copySize));
va += copySize;
offset += (int)copySize;
}
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
}
}
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
{
if (size == 0)
{
return ReadOnlySpan<byte>.Empty;
}
if (tracked)
{
SignalMemoryTracking(va, (ulong)size, false);
}
if (TryGetVirtualContiguous(va, size, out MemoryBlock memoryBlock, out ulong offset))
{
return memoryBlock.GetSpan(offset, size);
}
else
{
Span<byte> data = new byte[size];
ReadImpl(va, data);
return data;
}
}
public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
{
if (size == 0)
{
return new WritableRegion(null, va, Memory<byte>.Empty);
}
if (tracked)
{
SignalMemoryTracking(va, (ulong)size, true);
}
if (TryGetVirtualContiguous(va, size, out MemoryBlock memoryBlock, out ulong offset))
{
return new WritableRegion(null, va, memoryBlock.GetMemory(offset, size));
}
else
{
Memory<byte> memory = new byte[size];
ReadImpl(va, memory.Span);
return new WritableRegion(this, va, memory);
}
}
public ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!TryGetVirtualContiguous(va, Unsafe.SizeOf<T>(), out MemoryBlock memory, out ulong offset))
{
ThrowMemoryNotContiguous();
}
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
return ref memory.GetRef<T>(offset);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsMapped(ulong va)
{
return ValidateAddress(va) && _pages.IsMapped(va);
}
public bool IsRangeMapped(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
return _pages.IsRangeMapped(va, size);
}
private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
private bool TryGetVirtualContiguous(ulong va, int size, out MemoryBlock memory, out ulong offset)
{
if (_addressSpace.HasAnyPrivateAllocation(va, (ulong)size, out PrivateRange range))
{
// If we have a private allocation overlapping the range,
// then the access is only considered contiguous if it covers the entire range.
if (range.Memory != null)
{
memory = range.Memory;
offset = range.Offset;
return true;
}
memory = null;
offset = 0;
return false;
}
memory = _backingMemory;
offset = GetPhysicalAddressInternal(va);
return IsPhysicalContiguous(va, size);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsPhysicalContiguous(ulong va, int size)
{
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
{
return false;
}
int pages = GetPagesCount(va, (uint)size, out va);
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return false;
}
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return false;
}
va += PageSize;
}
return true;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private ulong GetContiguousSize(ulong va, ulong size)
{
ulong contiguousSize = PageSize - (va & PageMask);
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
{
return contiguousSize;
}
int pages = GetPagesCount(va, size, out va);
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return contiguousSize;
}
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return contiguousSize;
}
va += PageSize;
contiguousSize += PageSize;
}
return Math.Min(contiguousSize, size);
}
private (MemoryBlock, ulong, ulong) GetMemoryOffsetAndSize(ulong va, ulong size)
{
PrivateRange privateRange = _addressSpace.GetFirstPrivateAllocation(va, size, out ulong nextVa);
if (privateRange.Memory != null)
{
return (privateRange.Memory, privateRange.Offset, privateRange.Size);
}
ulong physSize = GetContiguousSize(va, Math.Min(size, nextVa - va));
return (_backingMemory, GetPhysicalAddressChecked(va), physSize);
}
public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
{
if (!ValidateAddressAndSize(va, size))
{
return null;
}
var regions = new List<HostMemoryRange>();
ulong endVa = va + size;
try
{
while (va < endVa)
{
(MemoryBlock memory, ulong rangeOffset, ulong rangeSize) = GetMemoryOffsetAndSize(va, endVa - va);
regions.Add(new((UIntPtr)memory.GetPointer(rangeOffset, rangeSize), rangeSize));
va += rangeSize;
}
}
catch (InvalidMemoryRegionException)
{
return null;
}
return regions;
}
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<MemoryRange>();
}
return GetPhysicalRegionsImpl(va, size);
}
private List<MemoryRange> GetPhysicalRegionsImpl(ulong va, ulong size)
{
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
{
return null;
}
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<MemoryRange>();
ulong regionStart = GetPhysicalAddressInternal(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return null;
}
ulong newPa = GetPhysicalAddressInternal(va + PageSize);
if (GetPhysicalAddressInternal(va) + PageSize != newPa)
{
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
va += PageSize;
regionSize += PageSize;
}
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
private void ReadImpl(ulong va, Span<byte> data)
{
if (data.Length == 0)
{
return;
}
try
{
AssertValidAddressAndSize(va, (ulong)data.Length);
ulong endVa = va + (ulong)data.Length;
int offset = 0;
while (va < endVa)
{
(MemoryBlock memory, ulong rangeOffset, ulong copySize) = GetMemoryOffsetAndSize(va, (ulong)(data.Length - offset));
memory.GetSpan(rangeOffset, (int)copySize).CopyTo(data.Slice(offset, (int)copySize));
va += copySize;
offset += (int)copySize;
}
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
}
}
/// <inheritdoc/>
/// <remarks>
/// This function also validates that the given range is both valid and mapped, and will throw if it is not.
/// </remarks>
public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
{
AssertValidAddressAndSize(va, size);
if (precise)
{
Tracking.VirtualMemoryEvent(va, size, write, precise: true, exemptId);
return;
}
// Software table, used for managed memory tracking.
_pages.SignalMemoryTracking(Tracking, va, size, write, exemptId);
}
/// <summary>
/// Computes the number of pages in a virtual address range.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <param name="startVa">The virtual address of the beginning of the first page</param>
/// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private int GetPagesCount(ulong va, ulong size, out ulong startVa)
{
// WARNING: Always check if ulong does not overflow during the operations.
startVa = va & ~(ulong)PageMask;
ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
return (int)(vaSpan / PageSize);
}
public RegionHandle BeginTracking(ulong address, ulong size, int id, RegionFlags flags = RegionFlags.None)
{
return Tracking.BeginTracking(address, size, id, flags);
}
public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity, int id, RegionFlags flags = RegionFlags.None)
{
return Tracking.BeginGranularTracking(address, size, handles, granularity, id, flags);
}
public SmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
{
return Tracking.BeginSmartGranularTracking(address, size, granularity, id);
}
private ulong GetPhysicalAddressChecked(ulong va)
{
if (!IsMapped(va))
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
}
return GetPhysicalAddressInternal(va);
}
private ulong GetPhysicalAddressInternal(ulong va)
{
return _nativePageTable.GetPhysicalAddress(va);
}
/// <inheritdoc/>
public void Reprotect(ulong va, ulong size, MemoryPermission protection)
{
// TODO
}
/// <inheritdoc/>
public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection, bool guest)
{
if (guest)
{
_addressSpace.Reprotect(va, size, protection);
}
else
{
_pages.TrackingReprotect(va, size, protection);
}
}
/// <summary>
/// Disposes of resources used by the memory manager.
/// </summary>
protected override void Destroy()
{
_addressSpace.Dispose();
_nativePageTable.Dispose();
}
protected override Span<byte> GetPhysicalAddressSpan(ulong pa, int size)
=> _backingMemory.GetSpan(pa, size);
protected override ulong TranslateVirtualAddressForRead(ulong va)
=> GetPhysicalAddressInternal(va);
}
}

View file

@ -1126,11 +1126,23 @@ namespace Ryujinx.Cpu.LightningJit.Arm32.Target.Arm64
Operand destination64 = new(destination.Kind, OperandType.I64, destination.Value); Operand destination64 = new(destination.Kind, OperandType.I64, destination.Value);
Operand basePointer = new(regAlloc.FixedPageTableRegister, RegisterType.Integer, OperandType.I64); Operand basePointer = new(regAlloc.FixedPageTableRegister, RegisterType.Integer, OperandType.I64);
if (mmType == MemoryManagerType.HostMapped || mmType == MemoryManagerType.HostMappedUnsafe) // We don't need to mask the address for the safe mode, since it is already naturally limited to 32-bit
{ // and can never reach out of the guest address space.
// We don't need to mask the address for the safe mode, since it is already naturally limited to 32-bit
// and can never reach out of the guest address space.
if (mmType.IsHostTracked())
{
int tempRegister = regAlloc.AllocateTempGprRegister();
Operand pte = new(tempRegister, RegisterType.Integer, OperandType.I64);
asm.Lsr(pte, guestAddress, new Operand(OperandKind.Constant, OperandType.I32, 12));
asm.LdrRr(pte, basePointer, pte, ArmExtensionType.Uxtx, true);
asm.Add(destination64, pte, guestAddress);
regAlloc.FreeTempGprRegister(tempRegister);
}
else if (mmType.IsHostMapped())
{
asm.Add(destination64, basePointer, guestAddress); asm.Add(destination64, basePointer, guestAddress);
} }
else else

View file

@ -1131,5 +1131,37 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
return false; return false;
} }
public static bool IsPartialRegisterUpdateMemory(this InstName name)
{
switch (name)
{
case InstName.Ld1AdvsimdSnglAsNoPostIndex:
case InstName.Ld1AdvsimdSnglAsPostIndex:
case InstName.Ld2AdvsimdSnglAsNoPostIndex:
case InstName.Ld2AdvsimdSnglAsPostIndex:
case InstName.Ld3AdvsimdSnglAsNoPostIndex:
case InstName.Ld3AdvsimdSnglAsPostIndex:
case InstName.Ld4AdvsimdSnglAsNoPostIndex:
case InstName.Ld4AdvsimdSnglAsPostIndex:
return true;
}
return false;
}
public static bool IsPrefetchMemory(this InstName name)
{
switch (name)
{
case InstName.PrfmImm:
case InstName.PrfmLit:
case InstName.PrfmReg:
case InstName.Prfum:
return true;
}
return false;
}
} }
} }

View file

@ -1,15 +1,12 @@
using ARMeilleure.Memory;
using Ryujinx.Cpu.LightningJit.CodeGen.Arm64; using Ryujinx.Cpu.LightningJit.CodeGen.Arm64;
using System; using System;
using System.Diagnostics;
using System.Numerics; using System.Numerics;
namespace Ryujinx.Cpu.LightningJit.Arm64 namespace Ryujinx.Cpu.LightningJit.Arm64
{ {
class RegisterAllocator class RegisterAllocator
{ {
public const int MaxTemps = 1;
public const int MaxTempsInclFixed = MaxTemps + 2;
private uint _gprMask; private uint _gprMask;
private readonly uint _fpSimdMask; private readonly uint _fpSimdMask;
private readonly uint _pStateMask; private readonly uint _pStateMask;
@ -25,7 +22,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
public uint AllFpSimdMask => _fpSimdMask; public uint AllFpSimdMask => _fpSimdMask;
public uint AllPStateMask => _pStateMask; public uint AllPStateMask => _pStateMask;
public RegisterAllocator(uint gprMask, uint fpSimdMask, uint pStateMask, bool hasHostCall) public RegisterAllocator(MemoryManagerType mmType, uint gprMask, uint fpSimdMask, uint pStateMask, bool hasHostCall)
{ {
_gprMask = gprMask; _gprMask = gprMask;
_fpSimdMask = fpSimdMask; _fpSimdMask = fpSimdMask;
@ -56,7 +53,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
BuildRegisterMap(_registerMap); BuildRegisterMap(_registerMap);
Span<int> tempRegisters = stackalloc int[MaxTemps]; Span<int> tempRegisters = stackalloc int[CalculateMaxTemps(mmType)];
for (int index = 0; index < tempRegisters.Length; index++) for (int index = 0; index < tempRegisters.Length; index++)
{ {
@ -150,5 +147,15 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
{ {
mask &= ~(1u << index); mask &= ~(1u << index);
} }
public static int CalculateMaxTemps(MemoryManagerType mmType)
{
return mmType.IsHostMapped() ? 1 : 2;
}
public static int CalculateMaxTempsInclFixed(MemoryManagerType mmType)
{
return CalculateMaxTemps(mmType) + 2;
}
} }
} }

View file

@ -247,7 +247,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
} }
} }
if (!flags.HasFlag(InstFlags.ReadRt)) if (!flags.HasFlag(InstFlags.ReadRt) || name.IsPartialRegisterUpdateMemory())
{ {
if (flags.HasFlag(InstFlags.Rt)) if (flags.HasFlag(InstFlags.Rt))
{ {
@ -281,7 +281,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
gprMask |= MaskFromIndex(ExtractRd(flags, encoding)); gprMask |= MaskFromIndex(ExtractRd(flags, encoding));
} }
if (!flags.HasFlag(InstFlags.ReadRt)) if (!flags.HasFlag(InstFlags.ReadRt) || name.IsPartialRegisterUpdateMemory())
{ {
if (flags.HasFlag(InstFlags.Rt)) if (flags.HasFlag(InstFlags.Rt))
{ {

View file

@ -316,7 +316,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
uint pStateUseMask = multiBlock.GlobalUseMask.PStateMask; uint pStateUseMask = multiBlock.GlobalUseMask.PStateMask;
CodeWriter writer = new(); CodeWriter writer = new();
RegisterAllocator regAlloc = new(gprUseMask, fpSimdUseMask, pStateUseMask, multiBlock.HasHostCall); RegisterAllocator regAlloc = new(memoryManager.Type, gprUseMask, fpSimdUseMask, pStateUseMask, multiBlock.HasHostCall);
RegisterSaveRestore rsr = new( RegisterSaveRestore rsr = new(
regAlloc.AllGprMask & AbiConstants.GprCalleeSavedRegsMask, regAlloc.AllGprMask & AbiConstants.GprCalleeSavedRegsMask,
regAlloc.AllFpSimdMask & AbiConstants.FpSimdCalleeSavedRegsMask, regAlloc.AllFpSimdMask & AbiConstants.FpSimdCalleeSavedRegsMask,

View file

@ -274,7 +274,8 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
uint tempGprUseMask = gprUseMask | instGprReadMask | instGprWriteMask; uint tempGprUseMask = gprUseMask | instGprReadMask | instGprWriteMask;
if (CalculateAvailableTemps(tempGprUseMask) < CalculateRequiredGprTemps(tempGprUseMask) || totalInsts++ >= MaxInstructionsPerFunction) if (CalculateAvailableTemps(tempGprUseMask) < CalculateRequiredGprTemps(memoryManager.Type, tempGprUseMask) ||
totalInsts++ >= MaxInstructionsPerFunction)
{ {
isTruncated = true; isTruncated = true;
address -= 4UL; address -= 4UL;
@ -378,9 +379,9 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
return false; return false;
} }
private static int CalculateRequiredGprTemps(uint gprUseMask) private static int CalculateRequiredGprTemps(MemoryManagerType mmType, uint gprUseMask)
{ {
return BitOperations.PopCount(gprUseMask & RegisterUtils.ReservedRegsMask) + RegisterAllocator.MaxTempsInclFixed; return BitOperations.PopCount(gprUseMask & RegisterUtils.ReservedRegsMask) + RegisterAllocator.CalculateMaxTempsInclFixed(mmType);
} }
private static int CalculateAvailableTemps(uint gprUseMask) private static int CalculateAvailableTemps(uint gprUseMask)

View file

@ -55,6 +55,16 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
ulong pc, ulong pc,
uint encoding) uint encoding)
{ {
if (name.IsPrefetchMemory() && mmType == MemoryManagerType.HostTrackedUnsafe)
{
// Prefetch to invalid addresses do not cause faults, so for memory manager
// types where we need to access the page table before doing the prefetch,
// we should make sure we won't try to access an out of bounds page table region.
// To do this, we force the masked memory manager variant to be used.
mmType = MemoryManagerType.HostTracked;
}
switch (addressForm) switch (addressForm)
{ {
case AddressForm.OffsetReg: case AddressForm.OffsetReg:
@ -511,18 +521,48 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
WriteAddressTranslation(asBits, mmType, regAlloc, ref asm, destination, guestAddress); WriteAddressTranslation(asBits, mmType, regAlloc, ref asm, destination, guestAddress);
} }
private static void WriteAddressTranslation(int asBits, MemoryManagerType mmType, RegisterAllocator regAlloc, ref Assembler asm, Operand destination, ulong guestAddress) private static void WriteAddressTranslation(
int asBits,
MemoryManagerType mmType,
RegisterAllocator regAlloc,
ref Assembler asm,
Operand destination,
ulong guestAddress)
{ {
asm.Mov(destination, guestAddress); asm.Mov(destination, guestAddress);
WriteAddressTranslation(asBits, mmType, regAlloc, ref asm, destination, destination); WriteAddressTranslation(asBits, mmType, regAlloc, ref asm, destination, destination);
} }
private static void WriteAddressTranslation(int asBits, MemoryManagerType mmType, RegisterAllocator regAlloc, ref Assembler asm, Operand destination, Operand guestAddress) private static void WriteAddressTranslation(
int asBits,
MemoryManagerType mmType,
RegisterAllocator regAlloc,
ref Assembler asm,
Operand destination,
Operand guestAddress)
{ {
Operand basePointer = new(regAlloc.FixedPageTableRegister, RegisterType.Integer, OperandType.I64); Operand basePointer = new(regAlloc.FixedPageTableRegister, RegisterType.Integer, OperandType.I64);
if (mmType == MemoryManagerType.HostMapped || mmType == MemoryManagerType.HostMappedUnsafe) if (mmType.IsHostTracked())
{
int tempRegister = regAlloc.AllocateTempGprRegister();
Operand pte = new(tempRegister, RegisterType.Integer, OperandType.I64);
asm.Lsr(pte, guestAddress, new Operand(OperandKind.Constant, OperandType.I32, 12));
if (mmType == MemoryManagerType.HostTracked)
{
asm.And(pte, pte, new Operand(OperandKind.Constant, OperandType.I64, ulong.MaxValue >> (64 - (asBits - 12))));
}
asm.LdrRr(pte, basePointer, pte, ArmExtensionType.Uxtx, true);
asm.Add(destination, pte, guestAddress);
regAlloc.FreeTempGprRegister(tempRegister);
}
else if (mmType.IsHostMapped())
{ {
if (mmType == MemoryManagerType.HostMapped) if (mmType == MemoryManagerType.HostMapped)
{ {

View file

@ -68,9 +68,9 @@ namespace Ryujinx.Cpu.LightningJit
FunctionTable.Fill = (ulong)Stubs.SlowDispatchStub; FunctionTable.Fill = (ulong)Stubs.SlowDispatchStub;
if (memory.Type.IsHostMapped()) if (memory.Type.IsHostMappedOrTracked())
{ {
NativeSignalHandler.InitializeSignalHandler(MemoryBlock.GetPageSize()); NativeSignalHandler.InitializeSignalHandler();
} }
} }

View file

@ -1,3 +1,4 @@
using Ryujinx.Common;
using Ryujinx.Cpu.Signal; using Ryujinx.Cpu.Signal;
using Ryujinx.Memory; using Ryujinx.Memory;
using Ryujinx.Memory.Tracking; using Ryujinx.Memory.Tracking;
@ -8,19 +9,27 @@ namespace Ryujinx.Cpu
{ {
public class MemoryEhMeilleure : IDisposable public class MemoryEhMeilleure : IDisposable
{ {
private delegate bool TrackingEventDelegate(ulong address, ulong size, bool write); public delegate ulong TrackingEventDelegate(ulong address, ulong size, bool write);
private readonly MemoryTracking _tracking;
private readonly TrackingEventDelegate _trackingEvent; private readonly TrackingEventDelegate _trackingEvent;
private readonly ulong _pageSize;
private readonly ulong _baseAddress; private readonly ulong _baseAddress;
private readonly ulong _mirrorAddress; private readonly ulong _mirrorAddress;
public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking) public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking, TrackingEventDelegate trackingEvent = null)
{ {
_baseAddress = (ulong)addressSpace.Pointer; _baseAddress = (ulong)addressSpace.Pointer;
ulong endAddress = _baseAddress + addressSpace.Size; ulong endAddress = _baseAddress + addressSpace.Size;
_trackingEvent = tracking.VirtualMemoryEvent; _tracking = tracking;
_trackingEvent = trackingEvent ?? VirtualMemoryEvent;
_pageSize = MemoryBlock.GetPageSize();
bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent)); bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added) if (!added)
@ -28,7 +37,7 @@ namespace Ryujinx.Cpu
throw new InvalidOperationException("Number of allowed tracked regions exceeded."); throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
} }
if (OperatingSystem.IsWindows()) if (OperatingSystem.IsWindows() && addressSpaceMirror != null)
{ {
// Add a tracking event with no signal handler for the mirror on Windows. // Add a tracking event with no signal handler for the mirror on Windows.
// The native handler has its own code to check for the partial overlap race when regions are protected by accident, // The native handler has its own code to check for the partial overlap race when regions are protected by accident,
@ -46,6 +55,21 @@ namespace Ryujinx.Cpu
} }
} }
private ulong VirtualMemoryEvent(ulong address, ulong size, bool write)
{
ulong pageSize = _pageSize;
ulong addressAligned = BitUtils.AlignDown(address, pageSize);
ulong endAddressAligned = BitUtils.AlignUp(address + size, pageSize);
ulong sizeAligned = endAddressAligned - addressAligned;
if (_tracking.VirtualMemoryEvent(addressAligned, sizeAligned, write))
{
return _baseAddress + address;
}
return 0;
}
public void Dispose() public void Dispose()
{ {
GC.SuppressFinalize(this); GC.SuppressFinalize(this);

View file

@ -143,7 +143,7 @@ namespace Ryujinx.Cpu
} }
} }
public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags) public PrivateMemoryAllocator(ulong blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
{ {
} }
@ -180,10 +180,10 @@ namespace Ryujinx.Cpu
private readonly List<T> _blocks; private readonly List<T> _blocks;
private readonly int _blockAlignment; private readonly ulong _blockAlignment;
private readonly MemoryAllocationFlags _allocationFlags; private readonly MemoryAllocationFlags _allocationFlags;
public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags) public PrivateMemoryAllocatorImpl(ulong blockAlignment, MemoryAllocationFlags allocationFlags)
{ {
_blocks = new List<T>(); _blocks = new List<T>();
_blockAlignment = blockAlignment; _blockAlignment = blockAlignment;
@ -212,7 +212,7 @@ namespace Ryujinx.Cpu
} }
} }
ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment); ulong blockAlignedSize = BitUtils.AlignUp(size, _blockAlignment);
var memory = new MemoryBlock(blockAlignedSize, _allocationFlags); var memory = new MemoryBlock(blockAlignedSize, _allocationFlags);
var newBlock = createBlock(memory, blockAlignedSize); var newBlock = createBlock(memory, blockAlignedSize);

View file

@ -70,7 +70,7 @@ namespace Ryujinx.Cpu.Signal
config = new SignalHandlerConfig(); config = new SignalHandlerConfig();
} }
public static void InitializeSignalHandler(ulong pageSize, Func<IntPtr, IntPtr, IntPtr> customSignalHandlerFactory = null) public static void InitializeSignalHandler(Func<IntPtr, IntPtr, IntPtr> customSignalHandlerFactory = null)
{ {
if (_initialized) if (_initialized)
{ {
@ -90,7 +90,7 @@ namespace Ryujinx.Cpu.Signal
if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS()) if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{ {
_signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateUnixSignalHandler(_handlerConfig, rangeStructSize, pageSize)); _signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateUnixSignalHandler(_handlerConfig, rangeStructSize));
if (customSignalHandlerFactory != null) if (customSignalHandlerFactory != null)
{ {
@ -107,7 +107,7 @@ namespace Ryujinx.Cpu.Signal
config.StructAddressOffset = 40; // ExceptionInformation1 config.StructAddressOffset = 40; // ExceptionInformation1
config.StructWriteOffset = 32; // ExceptionInformation0 config.StructWriteOffset = 32; // ExceptionInformation0
_signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateWindowsSignalHandler(_handlerConfig, rangeStructSize, pageSize)); _signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateWindowsSignalHandler(_handlerConfig, rangeStructSize));
if (customSignalHandlerFactory != null) if (customSignalHandlerFactory != null)
{ {
@ -175,5 +175,10 @@ namespace Ryujinx.Cpu.Signal
return false; return false;
} }
public static bool SupportsFaultAddressPatching()
{
return NativeSignalHandlerGenerator.SupportsFaultAddressPatchingForHost();
}
} }
} }

View file

@ -1622,14 +1622,6 @@ namespace Ryujinx.Graphics.Gpu.Image
/// <param name="size">The size of the flushing memory access</param> /// <param name="size">The size of the flushing memory access</param>
public void FlushAction(TextureGroupHandle handle, ulong address, ulong size) public void FlushAction(TextureGroupHandle handle, ulong address, ulong size)
{ {
// If the page size is larger than 4KB, we will have a lot of false positives for flushing.
// Let's avoid flushing textures that are unlikely to be read from CPU to improve performance
// on those platforms.
if (!_physicalMemory.Supports4KBPages && !Storage.Info.IsLinear && !_context.IsGpuThread())
{
return;
}
// There is a small gap here where the action is removed but _actionRegistered is still 1. // There is a small gap here where the action is removed but _actionRegistered is still 1.
// In this case it will skip registering the action, but here we are already handling it, // In this case it will skip registering the action, but here we are already handling it,
// so there shouldn't be any issue as it's the same handler for all actions. // so there shouldn't be any issue as it's the same handler for all actions.

View file

@ -23,11 +23,6 @@ namespace Ryujinx.Graphics.Gpu.Memory
private readonly IVirtualMemoryManagerTracked _cpuMemory; private readonly IVirtualMemoryManagerTracked _cpuMemory;
private int _referenceCount; private int _referenceCount;
/// <summary>
/// Indicates whenever the memory manager supports 4KB pages.
/// </summary>
public bool Supports4KBPages => _cpuMemory.Supports4KBPages;
/// <summary> /// <summary>
/// In-memory shader cache. /// In-memory shader cache.
/// </summary> /// </summary>

View file

@ -72,7 +72,8 @@ namespace Ryujinx.HLE.HOS
AddressSpace addressSpace = null; AddressSpace addressSpace = null;
if (mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) // We want to use host tracked mode if the host page size is > 4KB.
if ((mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) && MemoryBlock.GetPageSize() <= 0x1000)
{ {
if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, MemoryBlock.GetPageSize() == MemoryManagerHostMapped.PageSize, out addressSpace)) if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, MemoryBlock.GetPageSize() == MemoryManagerHostMapped.PageSize, out addressSpace))
{ {
@ -91,13 +92,21 @@ namespace Ryujinx.HLE.HOS
case MemoryManagerMode.HostMapped: case MemoryManagerMode.HostMapped:
case MemoryManagerMode.HostMappedUnsafe: case MemoryManagerMode.HostMappedUnsafe:
if (addressSpaceSize != addressSpace.AddressSpaceSize) if (addressSpace == null)
{ {
Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})"); var memoryManagerHostTracked = new MemoryManagerHostTracked(context.Memory, addressSpaceSize, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
processContext = new ArmProcessContext<MemoryManagerHostTracked>(pid, cpuEngine, _gpu, memoryManagerHostTracked, addressSpaceSize, for64Bit);
} }
else
{
if (addressSpaceSize != addressSpace.AddressSpaceSize)
{
Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
}
var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler); var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
processContext = new ArmProcessContext<MemoryManagerHostMapped>(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit); processContext = new ArmProcessContext<MemoryManagerHostMapped>(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
}
break; break;
default: default:

View file

@ -165,6 +165,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <inheritdoc/> /// <inheritdoc/>
protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size) protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size)
{ {
ulong backingStart = (ulong)Context.Memory.Pointer;
ulong backingEnd = backingStart + Context.Memory.Size;
KPageList pageList = new();
foreach (HostMemoryRange region in regions)
{
// If the range is inside the physical memory, it is shared and we should increment the page count,
// otherwise it is private and we don't need to increment the page count.
if (region.Address >= backingStart && region.Address < backingEnd)
{
pageList.AddRange(region.Address - backingStart + DramMemoryMap.DramBase, region.Size / PageSize);
}
}
using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
foreach (var pageNode in pageList)
{
Context.CommitMemory(pageNode.Address - DramMemoryMap.DramBase, pageNode.PagesCount * PageSize);
}
ulong offset = 0; ulong offset = 0;
foreach (var region in regions) foreach (var region in regions)
@ -174,6 +197,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
offset += region.Size; offset += region.Size;
} }
scopedPageList.SignalSuccess();
return Result.Success; return Result.Success;
} }

View file

@ -283,9 +283,9 @@ namespace Ryujinx.Memory
{ {
var hostRegion = hostRegions[i]; var hostRegion = hostRegions[i];
if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd) if (hostRegion.Address >= backingStart && hostRegion.Address < backingEnd)
{ {
regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size); regions[count++] = new MemoryRange(hostRegion.Address - backingStart, hostRegion.Size);
} }
} }

View file

@ -70,9 +70,12 @@ namespace Ryujinx.Memory.Tracking
{ {
_lastPermission = MemoryPermission.Invalid; _lastPermission = MemoryPermission.Invalid;
foreach (RegionHandle handle in Handles) if (!Guest)
{ {
handle.SignalMappingChanged(mapped); foreach (RegionHandle handle in Handles)
{
handle.SignalMappingChanged(mapped);
}
} }
} }