mirror of
https://github.com/ryujinx-mirror/ryujinx.git
synced 2024-12-23 18:45:44 +00:00
22b2cb39af
* Turn `MemoryOperand` into a struct * Remove `IntrinsicOperation` * Remove `PhiNode` * Remove `Node` * Turn `Operand` into a struct * Turn `Operation` into a struct * Clean up pool management methods * Add `Arena` allocator * Move `OperationHelper` to `Operation.Factory` * Move `OperandHelper` to `Operand.Factory` * Optimize `Operation` a bit * Fix `Arena` initialization * Rename `NativeList<T>` to `ArenaList<T>` * Reduce `Operand` size from 88 to 56 bytes * Reduce `Operation` size from 56 to 40 bytes * Add optimistic interning of Register & Constant operands * Optimize `RegisterUsage` pass a bit * Optimize `RemoveUnusedNodes` pass a bit Iterating in reverse-order allows killing dependency chains in a single pass. * Fix PPTC symbols * Optimize `BasicBlock` a bit Reduce allocations from `_successor` & `DominanceFrontiers` * Fix `Operation` resize * Make `Arena` expandable Change the arena allocator to be expandable by allocating in pages, with some of them being pooled. Currently 32 pages are pooled. An LRU removal mechanism should probably be added to it. Apparently MHR can allocate bitmaps large enough to exceed the 16MB limit for the type. * Move `Arena` & `ArenaList` to `Common` * Remove `ThreadStaticPool` & co * Add `PhiOperation` * Reduce `Operand` size from 56 from 48 bytes * Add linear-probing to `Operand` intern table * Optimize `HybridAllocator` a bit * Add `Allocators` class * Tune `ArenaAllocator` sizes * Add page removal mechanism to `ArenaAllocator` Remove pages which have not been used for more than 5s after each reset. I am on fence if this would be better using a Gen2 callback object like the one in System.Buffers.ArrayPool<T>, to trim the pool. Because right now if a large translation happens, the pages will be freed only after a reset. This reset may not happen for a while because no new translation is hit, but the arena base sizes are rather small. * Fix `OOM` when allocating larger than page size in `ArenaAllocator` Tweak resizing mechanism for Operand.Uses and Assignemnts. * Optimize `Optimizer` a bit * Optimize `Operand.Add<T>/Remove<T>` a bit * Clean up `PreAllocator` * Fix phi insertion order Reduce codegen diffs. * Fix code alignment * Use new heuristics for degree of parallelism * Suppress warnings * Address gdkchan's feedback Renamed `GetValue()` to `GetValueUnsafe()` to make it more clear that `Operand.Value` should usually not be modified directly. * Add fast path to `ArenaAllocator` * Assembly for `ArenaAllocator.Allocate(ulong)`: .L0: mov rax, [rcx+0x18] lea r8, [rax+rdx] cmp r8, [rcx+0x10] ja short .L2 .L1: mov rdx, [rcx+8] add rax, [rdx+8] mov [rcx+0x18], r8 ret .L2: jmp ArenaAllocator.AllocateSlow(UInt64) A few variable/field had to be changed to ulong so that RyuJIT avoids emitting zero-extends. * Implement a new heuristic to free pooled pages. If an arena is used often, it is more likely that its pages will be needed, so the pages are kept for longer (e.g: during PPTC rebuild or burst sof compilations). If is not used often, then it is more likely that its pages will not be needed (e.g: after PPTC rebuild or bursts of compilations). * Address riperiperi's feedback * Use `EqualityComparer<T>` in `IntrusiveList<T>` Avoids a potential GC hole in `Equals(T, T)`.
216 lines
No EOL
5 KiB
C#
216 lines
No EOL
5 KiB
C#
using System;
|
|
using System.Collections;
|
|
using System.Collections.Generic;
|
|
using System.Numerics;
|
|
|
|
namespace ARMeilleure.Common
|
|
{
|
|
unsafe class BitMap : IEnumerable<int>, IDisposable
|
|
{
|
|
private const int IntSize = 64;
|
|
private const int IntMask = IntSize - 1;
|
|
|
|
private int _count;
|
|
private long* _masks;
|
|
private readonly Allocator _allocator;
|
|
|
|
public BitMap(Allocator allocator)
|
|
{
|
|
_allocator = allocator;
|
|
}
|
|
|
|
public BitMap(Allocator allocator, int capacity) : this(allocator)
|
|
{
|
|
EnsureCapacity(capacity);
|
|
}
|
|
|
|
public bool Set(int bit)
|
|
{
|
|
EnsureCapacity(bit + 1);
|
|
|
|
int wordIndex = bit / IntSize;
|
|
int wordBit = bit & IntMask;
|
|
|
|
long wordMask = 1L << wordBit;
|
|
|
|
if ((_masks[wordIndex] & wordMask) != 0)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
_masks[wordIndex] |= wordMask;
|
|
|
|
return true;
|
|
}
|
|
|
|
public void Clear(int bit)
|
|
{
|
|
EnsureCapacity(bit + 1);
|
|
|
|
int wordIndex = bit / IntSize;
|
|
int wordBit = bit & IntMask;
|
|
|
|
long wordMask = 1L << wordBit;
|
|
|
|
_masks[wordIndex] &= ~wordMask;
|
|
}
|
|
|
|
public bool IsSet(int bit)
|
|
{
|
|
EnsureCapacity(bit + 1);
|
|
|
|
int wordIndex = bit / IntSize;
|
|
int wordBit = bit & IntMask;
|
|
|
|
return (_masks[wordIndex] & (1L << wordBit)) != 0;
|
|
}
|
|
|
|
public int FindFirstUnset()
|
|
{
|
|
for (int index = 0; index < _count; index++)
|
|
{
|
|
long mask = _masks[index];
|
|
|
|
if (mask != -1L)
|
|
{
|
|
return BitOperations.TrailingZeroCount(~mask) + index * IntSize;
|
|
}
|
|
}
|
|
|
|
return _count * IntSize;
|
|
}
|
|
|
|
public bool Set(BitMap map)
|
|
{
|
|
EnsureCapacity(map._count * IntSize);
|
|
|
|
bool modified = false;
|
|
|
|
for (int index = 0; index < _count; index++)
|
|
{
|
|
long newValue = _masks[index] | map._masks[index];
|
|
|
|
if (_masks[index] != newValue)
|
|
{
|
|
_masks[index] = newValue;
|
|
|
|
modified = true;
|
|
}
|
|
}
|
|
|
|
return modified;
|
|
}
|
|
|
|
public bool Clear(BitMap map)
|
|
{
|
|
EnsureCapacity(map._count * IntSize);
|
|
|
|
bool modified = false;
|
|
|
|
for (int index = 0; index < _count; index++)
|
|
{
|
|
long newValue = _masks[index] & ~map._masks[index];
|
|
|
|
if (_masks[index] != newValue)
|
|
{
|
|
_masks[index] = newValue;
|
|
|
|
modified = true;
|
|
}
|
|
}
|
|
|
|
return modified;
|
|
}
|
|
|
|
private void EnsureCapacity(int size)
|
|
{
|
|
int count = (size + IntMask) / IntSize;
|
|
|
|
if (count > _count)
|
|
{
|
|
var oldMask = _masks;
|
|
var oldSpan = new Span<long>(_masks, _count);
|
|
|
|
_masks = _allocator.Allocate<long>((uint)count);
|
|
_count = count;
|
|
|
|
var newSpan = new Span<long>(_masks, _count);
|
|
|
|
oldSpan.CopyTo(newSpan);
|
|
newSpan.Slice(oldSpan.Length).Clear();
|
|
|
|
_allocator.Free(oldMask);
|
|
}
|
|
}
|
|
|
|
public void Dispose()
|
|
{
|
|
if (_masks != null)
|
|
{
|
|
_allocator.Free(_masks);
|
|
|
|
_masks = null;
|
|
}
|
|
}
|
|
|
|
IEnumerator IEnumerable.GetEnumerator()
|
|
{
|
|
return GetEnumerator();
|
|
}
|
|
|
|
IEnumerator<int> IEnumerable<int>.GetEnumerator()
|
|
{
|
|
return GetEnumerator();
|
|
}
|
|
|
|
public Enumerator GetEnumerator()
|
|
{
|
|
return new Enumerator(this);
|
|
}
|
|
|
|
public struct Enumerator : IEnumerator<int>
|
|
{
|
|
private int _index;
|
|
private long _mask;
|
|
private int _bit;
|
|
private readonly BitMap _map;
|
|
|
|
public int Current => _index * IntSize + _bit;
|
|
object IEnumerator.Current => Current;
|
|
|
|
public Enumerator(BitMap map)
|
|
{
|
|
_index = -1;
|
|
_mask = 0;
|
|
_bit = 0;
|
|
_map = map;
|
|
}
|
|
|
|
public bool MoveNext()
|
|
{
|
|
if (_mask != 0)
|
|
{
|
|
_mask &= ~(1L << _bit);
|
|
}
|
|
|
|
while (_mask == 0)
|
|
{
|
|
if (++_index >= _map._count)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
_mask = _map._masks[_index];
|
|
}
|
|
|
|
_bit = BitOperations.TrailingZeroCount(_mask);
|
|
|
|
return true;
|
|
}
|
|
|
|
public void Reset() { }
|
|
|
|
public void Dispose() { }
|
|
}
|
|
}
|
|
} |