0
0
Fork 0
mirror of https://github.com/ryujinx-mirror/ryujinx.git synced 2024-12-22 22:05:45 +00:00

Delete old 16KB page workarounds (#6584)

* Delete old 16KB page workarounds

* Rename Supports4KBPage to UsesPrivateAllocations

* Format whitespace

* This one should be false too

* Update XML doc
This commit is contained in:
gdkchan 2024-04-06 13:51:44 -03:00 committed by GitHub
parent 3be616207d
commit 12b235700c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 25 additions and 459 deletions

View file

@ -1,5 +1,3 @@
using Ryujinx.Common;
using Ryujinx.Common.Collections;
using Ryujinx.Memory; using Ryujinx.Memory;
using System; using System;
@ -7,175 +5,23 @@ namespace Ryujinx.Cpu
{ {
public class AddressSpace : IDisposable public class AddressSpace : IDisposable
{ {
private const int DefaultBlockAlignment = 1 << 20;
private enum MappingType : byte
{
None,
Private,
Shared,
}
private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>
{
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public ulong EndAddress => Address + Size;
public MappingType Type { get; private set; }
public Mapping(ulong address, ulong size, MappingType type)
{
Address = address;
Size = size;
Type = type;
}
public Mapping Split(ulong splitAddress)
{
ulong leftSize = splitAddress - Address;
ulong rightSize = EndAddress - splitAddress;
Mapping left = new(Address, leftSize, Type);
Address = splitAddress;
Size = rightSize;
return left;
}
public void UpdateState(MappingType newType)
{
Type = newType;
}
public void Extend(ulong sizeDelta)
{
Size += sizeDelta;
}
public int CompareTo(Mapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>
{
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public ulong EndAddress => Address + Size;
public PrivateMemoryAllocation PrivateAllocation { get; private set; }
public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
{
Address = address;
Size = size;
PrivateAllocation = privateAllocation;
}
public PrivateMapping Split(ulong splitAddress)
{
ulong leftSize = splitAddress - Address;
ulong rightSize = EndAddress - splitAddress;
(var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
PrivateMapping left = new(Address, leftSize, leftAllocation);
Address = splitAddress;
Size = rightSize;
return left;
}
public void Map(MemoryBlock baseBlock, MemoryBlock mirrorBlock, PrivateMemoryAllocation newAllocation)
{
baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size);
mirrorBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size);
PrivateAllocation = newAllocation;
}
public void Unmap(MemoryBlock baseBlock, MemoryBlock mirrorBlock)
{
if (PrivateAllocation.IsValid)
{
baseBlock.UnmapView(PrivateAllocation.Memory, Address, Size);
mirrorBlock.UnmapView(PrivateAllocation.Memory, Address, Size);
PrivateAllocation.Dispose();
}
PrivateAllocation = default;
}
public void Extend(ulong sizeDelta)
{
Size += sizeDelta;
}
public int CompareTo(PrivateMapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private readonly MemoryBlock _backingMemory; private readonly MemoryBlock _backingMemory;
private readonly PrivateMemoryAllocator _privateMemoryAllocator;
private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
private readonly IntrusiveRedBlackTree<PrivateMapping> _privateTree;
private readonly object _treeLock;
private readonly bool _supports4KBPages;
public MemoryBlock Base { get; } public MemoryBlock Base { get; }
public MemoryBlock Mirror { get; } public MemoryBlock Mirror { get; }
public ulong AddressSpaceSize { get; } public ulong AddressSpaceSize { get; }
public AddressSpace(MemoryBlock backingMemory, MemoryBlock baseMemory, MemoryBlock mirrorMemory, ulong addressSpaceSize, bool supports4KBPages) public AddressSpace(MemoryBlock backingMemory, MemoryBlock baseMemory, MemoryBlock mirrorMemory, ulong addressSpaceSize)
{ {
if (!supports4KBPages)
{
_privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable | MemoryAllocationFlags.NoMap);
_mappingTree = new IntrusiveRedBlackTree<Mapping>();
_privateTree = new IntrusiveRedBlackTree<PrivateMapping>();
_treeLock = new object();
_mappingTree.Add(new Mapping(0UL, addressSpaceSize, MappingType.None));
_privateTree.Add(new PrivateMapping(0UL, addressSpaceSize, default));
}
_backingMemory = backingMemory; _backingMemory = backingMemory;
_supports4KBPages = supports4KBPages;
Base = baseMemory; Base = baseMemory;
Mirror = mirrorMemory; Mirror = mirrorMemory;
AddressSpaceSize = addressSpaceSize; AddressSpaceSize = addressSpaceSize;
} }
public static bool TryCreate(MemoryBlock backingMemory, ulong asSize, bool supports4KBPages, out AddressSpace addressSpace) public static bool TryCreate(MemoryBlock backingMemory, ulong asSize, out AddressSpace addressSpace)
{ {
addressSpace = null; addressSpace = null;
@ -193,7 +39,7 @@ namespace Ryujinx.Cpu
{ {
baseMemory = new MemoryBlock(addressSpaceSize, AsFlags); baseMemory = new MemoryBlock(addressSpaceSize, AsFlags);
mirrorMemory = new MemoryBlock(addressSpaceSize, AsFlags); mirrorMemory = new MemoryBlock(addressSpaceSize, AsFlags);
addressSpace = new AddressSpace(backingMemory, baseMemory, mirrorMemory, addressSpaceSize, supports4KBPages); addressSpace = new AddressSpace(backingMemory, baseMemory, mirrorMemory, addressSpaceSize);
break; break;
} }
@ -208,290 +54,21 @@ namespace Ryujinx.Cpu
} }
public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
if (_supports4KBPages)
{ {
Base.MapView(_backingMemory, pa, va, size); Base.MapView(_backingMemory, pa, va, size);
Mirror.MapView(_backingMemory, pa, va, size); Mirror.MapView(_backingMemory, pa, va, size);
return;
}
lock (_treeLock)
{
ulong alignment = MemoryBlock.GetPageSize();
bool isAligned = ((va | pa | size) & (alignment - 1)) == 0;
if (flags.HasFlag(MemoryMapFlags.Private) && !isAligned)
{
Update(va, pa, size, MappingType.Private);
}
else
{
// The update method assumes that shared mappings are already aligned.
if (!flags.HasFlag(MemoryMapFlags.Private))
{
if ((va & (alignment - 1)) != (pa & (alignment - 1)))
{
throw new InvalidMemoryRegionException($"Virtual address 0x{va:X} and physical address 0x{pa:X} are misaligned and can't be aligned.");
}
ulong endAddress = va + size;
va = BitUtils.AlignDown(va, alignment);
pa = BitUtils.AlignDown(pa, alignment);
size = BitUtils.AlignUp(endAddress, alignment) - va;
}
Update(va, pa, size, MappingType.Shared);
}
}
} }
public void Unmap(ulong va, ulong size) public void Unmap(ulong va, ulong size)
{
if (_supports4KBPages)
{ {
Base.UnmapView(_backingMemory, va, size); Base.UnmapView(_backingMemory, va, size);
Mirror.UnmapView(_backingMemory, va, size); Mirror.UnmapView(_backingMemory, va, size);
return;
}
lock (_treeLock)
{
Update(va, 0UL, size, MappingType.None);
}
}
private void Update(ulong va, ulong pa, ulong size, MappingType type)
{
Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
Update(map, va, pa, size, type);
}
private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
{
ulong endAddress = va + size;
for (; map != null; map = map.Successor)
{
if (map.Address < va)
{
_mappingTree.Add(map.Split(va));
}
if (map.EndAddress > endAddress)
{
Mapping newMap = map.Split(endAddress);
_mappingTree.Add(newMap);
map = newMap;
}
switch (type)
{
case MappingType.None:
if (map.Type == MappingType.Shared)
{
ulong startOffset = map.Address - va;
ulong mapVa = va + startOffset;
ulong mapSize = Math.Min(size - startOffset, map.Size);
ulong mapEndAddress = mapVa + mapSize;
ulong alignment = MemoryBlock.GetPageSize();
mapVa = BitUtils.AlignDown(mapVa, alignment);
mapEndAddress = BitUtils.AlignUp(mapEndAddress, alignment);
mapSize = mapEndAddress - mapVa;
Base.UnmapView(_backingMemory, mapVa, mapSize);
Mirror.UnmapView(_backingMemory, mapVa, mapSize);
}
else
{
UnmapPrivate(va, size);
}
break;
case MappingType.Private:
if (map.Type == MappingType.Shared)
{
throw new InvalidMemoryRegionException($"Private mapping request at 0x{va:X} with size 0x{size:X} overlaps shared mapping at 0x{map.Address:X} with size 0x{map.Size:X}.");
}
else
{
MapPrivate(va, size);
}
break;
case MappingType.Shared:
if (map.Type != MappingType.None)
{
throw new InvalidMemoryRegionException($"Shared mapping request at 0x{va:X} with size 0x{size:X} overlaps mapping at 0x{map.Address:X} with size 0x{map.Size:X}.");
}
else
{
ulong startOffset = map.Address - va;
ulong mapPa = pa + startOffset;
ulong mapVa = va + startOffset;
ulong mapSize = Math.Min(size - startOffset, map.Size);
Base.MapView(_backingMemory, mapPa, mapVa, mapSize);
Mirror.MapView(_backingMemory, mapPa, mapVa, mapSize);
}
break;
}
map.UpdateState(type);
map = TryCoalesce(map);
if (map.EndAddress >= endAddress)
{
break;
}
}
return map;
}
private Mapping TryCoalesce(Mapping map)
{
Mapping previousMap = map.Predecessor;
Mapping nextMap = map.Successor;
if (previousMap != null && CanCoalesce(previousMap, map))
{
previousMap.Extend(map.Size);
_mappingTree.Remove(map);
map = previousMap;
}
if (nextMap != null && CanCoalesce(map, nextMap))
{
map.Extend(nextMap.Size);
_mappingTree.Remove(nextMap);
}
return map;
}
private static bool CanCoalesce(Mapping left, Mapping right)
{
return left.Type == right.Type;
}
private void MapPrivate(ulong va, ulong size)
{
ulong endAddress = va + size;
ulong alignment = MemoryBlock.GetPageSize();
// Expand the range outwards based on page size to ensure that at least the requested region is mapped.
ulong vaAligned = BitUtils.AlignDown(va, alignment);
ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
for (; map != null; map = map.Successor)
{
if (!map.PrivateAllocation.IsValid)
{
if (map.Address < vaAligned)
{
_privateTree.Add(map.Split(vaAligned));
}
if (map.EndAddress > endAddressAligned)
{
PrivateMapping newMap = map.Split(endAddressAligned);
_privateTree.Add(newMap);
map = newMap;
}
map.Map(Base, Mirror, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize()));
}
if (map.EndAddress >= endAddressAligned)
{
break;
}
}
}
private void UnmapPrivate(ulong va, ulong size)
{
ulong endAddress = va + size;
ulong alignment = MemoryBlock.GetPageSize();
// Shrink the range inwards based on page size to ensure we won't unmap memory that might be still in use.
ulong vaAligned = BitUtils.AlignUp(va, alignment);
ulong endAddressAligned = BitUtils.AlignDown(endAddress, alignment);
if (endAddressAligned <= vaAligned)
{
return;
}
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
for (; map != null; map = map.Successor)
{
if (map.PrivateAllocation.IsValid)
{
if (map.Address < vaAligned)
{
_privateTree.Add(map.Split(vaAligned));
}
if (map.EndAddress > endAddressAligned)
{
PrivateMapping newMap = map.Split(endAddressAligned);
_privateTree.Add(newMap);
map = newMap;
}
map.Unmap(Base, Mirror);
map = TryCoalesce(map);
}
if (map.EndAddress >= endAddressAligned)
{
break;
}
}
}
private PrivateMapping TryCoalesce(PrivateMapping map)
{
PrivateMapping previousMap = map.Predecessor;
PrivateMapping nextMap = map.Successor;
if (previousMap != null && CanCoalesce(previousMap, map))
{
previousMap.Extend(map.Size);
_privateTree.Remove(map);
map = previousMap;
}
if (nextMap != null && CanCoalesce(map, nextMap))
{
map.Extend(nextMap.Size);
_privateTree.Remove(nextMap);
}
return map;
}
private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
{
return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
} }
public void Dispose() public void Dispose()
{ {
GC.SuppressFinalize(this); GC.SuppressFinalize(this);
_privateMemoryAllocator?.Dispose();
Base.Dispose(); Base.Dispose();
Mirror.Dispose(); Mirror.Dispose();
} }

View file

@ -28,7 +28,7 @@ namespace Ryujinx.Cpu.AppleHv
private readonly ManagedPageFlags _pages; private readonly ManagedPageFlags _pages;
public bool Supports4KBPages => true; public bool UsesPrivateAllocations => false;
public int AddressSpaceBits { get; } public int AddressSpaceBits { get; }

View file

@ -25,7 +25,7 @@ namespace Ryujinx.Cpu.Jit
private readonly InvalidAccessHandler _invalidAccessHandler; private readonly InvalidAccessHandler _invalidAccessHandler;
/// <inheritdoc/> /// <inheritdoc/>
public bool Supports4KBPages => true; public bool UsesPrivateAllocations => false;
/// <summary> /// <summary>
/// Address space width in bits. /// Address space width in bits.

View file

@ -27,7 +27,7 @@ namespace Ryujinx.Cpu.Jit
private readonly ManagedPageFlags _pages; private readonly ManagedPageFlags _pages;
/// <inheritdoc/> /// <inheritdoc/>
public bool Supports4KBPages => MemoryBlock.GetPageSize() == PageSize; public bool UsesPrivateAllocations => false;
public int AddressSpaceBits { get; } public int AddressSpaceBits { get; }

View file

@ -33,7 +33,7 @@ namespace Ryujinx.Cpu.Jit
protected override ulong AddressSpaceSize { get; } protected override ulong AddressSpaceSize { get; }
/// <inheritdoc/> /// <inheritdoc/>
public bool Supports4KBPages => false; public bool UsesPrivateAllocations => true;
public IntPtr PageTablePointer => _nativePageTable.PageTablePointer; public IntPtr PageTablePointer => _nativePageTable.PageTablePointer;

View file

@ -75,7 +75,7 @@ namespace Ryujinx.HLE.HOS
// We want to use host tracked mode if the host page size is > 4KB. // We want to use host tracked mode if the host page size is > 4KB.
if ((mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) && MemoryBlock.GetPageSize() <= 0x1000) if ((mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) && MemoryBlock.GetPageSize() <= 0x1000)
{ {
if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, MemoryBlock.GetPageSize() == MemoryManagerHostMapped.PageSize, out addressSpace)) if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, out addressSpace))
{ {
Logger.Warning?.Print(LogClass.Cpu, "Address space creation failed, falling back to software page table"); Logger.Warning?.Print(LogClass.Cpu, "Address space creation failed, falling back to software page table");

View file

@ -11,7 +11,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{ {
private readonly IVirtualMemoryManager _cpuMemory; private readonly IVirtualMemoryManager _cpuMemory;
protected override bool Supports4KBPages => _cpuMemory.Supports4KBPages; protected override bool UsesPrivateAllocations => _cpuMemory.UsesPrivateAllocations;
public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory, ulong reservedAddressSpaceSize) : base(context, reservedAddressSpaceSize) public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory, ulong reservedAddressSpaceSize) : base(context, reservedAddressSpaceSize)
{ {

View file

@ -32,7 +32,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private const int MaxBlocksNeededForInsertion = 2; private const int MaxBlocksNeededForInsertion = 2;
protected readonly KernelContext Context; protected readonly KernelContext Context;
protected virtual bool Supports4KBPages => true; protected virtual bool UsesPrivateAllocations => false;
public ulong AddrSpaceStart { get; private set; } public ulong AddrSpaceStart { get; private set; }
public ulong AddrSpaceEnd { get; private set; } public ulong AddrSpaceEnd { get; private set; }
@ -1947,17 +1947,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Result result; Result result;
if (srcPageTable.Supports4KBPages) if (srcPageTable.UsesPrivateAllocations)
{
result = MapForeign(srcPageTable.GetHostRegions(addressRounded, alignedSize), currentVa, alignedSize);
}
else
{ {
KPageList pageList = new(); KPageList pageList = new();
srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList); srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
result = MapPages(currentVa, pageList, permission, MemoryMapFlags.None); result = MapPages(currentVa, pageList, permission, MemoryMapFlags.None);
} }
else
{
result = MapForeign(srcPageTable.GetHostRegions(addressRounded, alignedSize), currentVa, alignedSize);
}
if (result != Result.Success) if (result != Result.Success)
{ {

View file

@ -2,7 +2,6 @@ using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common; using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process; using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common; using Ryujinx.Horizon.Common;
using Ryujinx.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Memory namespace Ryujinx.HLE.HOS.Kernel.Memory
{ {
@ -49,18 +48,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidPermission; return KernelResult.InvalidPermission;
} }
// On platforms with page size > 4 KB, this can fail due to the address not being page aligned,
// we can return an error to force the application to retry with a different address.
try
{
return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission); return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
} }
catch (InvalidMemoryRegionException)
{
return KernelResult.InvalidMemState;
}
}
public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process) public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
{ {

View file

@ -13,7 +13,7 @@ namespace Ryujinx.Memory
public sealed class AddressSpaceManager : VirtualMemoryManagerBase, IVirtualMemoryManager public sealed class AddressSpaceManager : VirtualMemoryManagerBase, IVirtualMemoryManager
{ {
/// <inheritdoc/> /// <inheritdoc/>
public bool Supports4KBPages => true; public bool UsesPrivateAllocations => false;
/// <summary> /// <summary>
/// Address space width in bits. /// Address space width in bits.

View file

@ -8,10 +8,10 @@ namespace Ryujinx.Memory
public interface IVirtualMemoryManager public interface IVirtualMemoryManager
{ {
/// <summary> /// <summary>
/// Indicates whenever the memory manager supports aliasing pages at 4KB granularity. /// Indicates whether the memory manager creates private allocations when the <see cref="MemoryMapFlags.Private"/> flag is set on map.
/// </summary> /// </summary>
/// <returns>True if 4KB pages are supported by the memory manager, false otherwise</returns> /// <returns>True if private mappings might be used, false otherwise</returns>
bool Supports4KBPages { get; } bool UsesPrivateAllocations { get; }
/// <summary> /// <summary>
/// Maps a virtual memory range into a physical memory range. /// Maps a virtual memory range into a physical memory range.

View file

@ -8,7 +8,7 @@ namespace Ryujinx.Tests.Memory
{ {
public class MockVirtualMemoryManager : IVirtualMemoryManager public class MockVirtualMemoryManager : IVirtualMemoryManager
{ {
public bool Supports4KBPages => true; public bool UsesPrivateAllocations => false;
public bool NoMappings = false; public bool NoMappings = false;