diff --git a/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs b/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs index 0a74eace..649cfb93 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs @@ -46,6 +46,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process public KAddressArbiter AddressArbiter { get; private set; } public long[] RandomEntropy { get; private set; } + public KThread[] PinnedThreads { get; private set; } private bool _signaled; @@ -102,6 +103,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process Capabilities = new KProcessCapabilities(); RandomEntropy = new long[KScheduler.CpuCoresCount]; + PinnedThreads = new KThread[KScheduler.CpuCoresCount]; // TODO: Remove once we no longer need to initialize it externally. HandleTable = new KHandleTable(context); @@ -749,7 +751,24 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { KThread currentThread = KernelStatic.GetCurrentThread(); - if (currentThread.IsSchedulable) + if (currentThread.Owner != null && + currentThread.GetUserDisableCount() != 0 && + currentThread.Owner.PinnedThreads[currentThread.CurrentCore] == null) + { + KernelContext.CriticalSection.Enter(); + + currentThread.Owner.PinThread(currentThread); + + currentThread.SetUserInterruptFlag(); + + if (currentThread.IsSchedulable) + { + KernelContext.Schedulers[currentThread.CurrentCore].Schedule(); + } + + KernelContext.CriticalSection.Leave(); + } + else if (currentThread.IsSchedulable) { KernelContext.Schedulers[currentThread.CurrentCore].Schedule(); } @@ -952,6 +971,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { KernelContext.CriticalSection.Enter(); + if (currentThread != null && PinnedThreads[currentThread.CurrentCore] == currentThread) + { + UnpinThread(currentThread); + } + foreach (KThread thread in _threads) { if ((thread.SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.TerminationPending) @@ -1139,5 +1163,35 @@ namespace Ryujinx.HLE.HOS.Kernel.Process return KernelResult.InvalidState; } + + public void PinThread(KThread thread) + { + if (!thread.TerminationRequested) + { + PinnedThreads[thread.CurrentCore] = thread; + + thread.Pin(); + + KernelContext.ThreadReselectionRequested = true; + } + } + + public void UnpinThread(KThread thread) + { + if (!thread.TerminationRequested) + { + thread.Unpin(); + + PinnedThreads[thread.CurrentCore] = null; + + KernelContext.ThreadReselectionRequested = true; + } + } + + public bool IsExceptionUserThread(KThread thread) + { + // TODO + return false; + } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs index 59d56b4d..2dd9d807 100644 --- a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs +++ b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs @@ -2655,6 +2655,13 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall }; } + public KernelResult SynchronizePreemptionState() + { + KernelStatic.GetCurrentThread().SynchronizePreemptionState(); + + return KernelResult.Success; + } + private bool IsPointingInsideKernel(ulong address) { return (address + 0x1000000000) < 0xffffff000; diff --git a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall32.cs b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall32.cs index bb1cc8ad..d955807d 100644 --- a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall32.cs +++ b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall32.cs @@ -491,5 +491,10 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall { return _syscall.SignalToAddress(address, type, value, count); } + + public KernelResult SynchronizePreemptionState32() + { + return _syscall.SynchronizePreemptionState(); + } } } diff --git a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall64.cs b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall64.cs index 97ded4b5..fc826552 100644 --- a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall64.cs +++ b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall64.cs @@ -405,5 +405,10 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall { return _syscall.SignalToAddress(address, type, value, count); } + + public KernelResult SynchronizePreemptionState64() + { + return _syscall.SynchronizePreemptionState(); + } } } diff --git a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs index b4e7a0bf..5e795d35 100644 --- a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs +++ b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs @@ -19,7 +19,22 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall public void SvcCall(object sender, InstExceptionEventArgs e) { - ExecutionContext context = (ExecutionContext)sender; + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (currentThread.Owner != null && + currentThread.GetUserDisableCount() != 0 && + currentThread.Owner.PinnedThreads[currentThread.CurrentCore] == null) + { + _context.CriticalSection.Enter(); + + currentThread.Owner.PinThread(currentThread); + + currentThread.SetUserInterruptFlag(); + + _context.CriticalSection.Leave(); + } + + ExecutionContext context = (ExecutionContext)sender; if (context.IsAarch32) { @@ -44,13 +59,6 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall svcFunc(_syscall64, context); } - PostSvcHandler(); - } - - private void PostSvcHandler() - { - KThread currentThread = KernelStatic.GetCurrentThread(); - currentThread.HandlePostSyscall(); } } diff --git a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallTable.cs b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallTable.cs index 7e9f08c0..178dc029 100644 --- a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallTable.cs +++ b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallTable.cs @@ -71,6 +71,7 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall { 0x33, nameof(Syscall64.GetThreadContext364) }, { 0x34, nameof(Syscall64.WaitForAddress64) }, { 0x35, nameof(Syscall64.SignalToAddress64) }, + { 0x36, nameof(Syscall64.SynchronizePreemptionState64) }, { 0x37, nameof(Syscall64.GetResourceLimitPeakValue64) }, { 0x40, nameof(Syscall64.CreateSession64) }, { 0x41, nameof(Syscall64.AcceptSession64) }, @@ -145,6 +146,7 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall { 0x33, nameof(Syscall32.GetThreadContext332) }, { 0x34, nameof(Syscall32.WaitForAddress32) }, { 0x35, nameof(Syscall32.SignalToAddress32) }, + { 0x36, nameof(Syscall32.SynchronizePreemptionState32) }, { 0x37, nameof(Syscall32.GetResourceLimitPeakValue32) }, { 0x40, nameof(Syscall32.CreateSession32) }, { 0x41, nameof(Syscall32.AcceptSession32) }, diff --git a/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs b/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs index 0982ceff..0c51b7b9 100644 --- a/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs +++ b/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs @@ -36,6 +36,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading private readonly KThread _idleThread; public KThread PreviousThread => _previousThread; + public KThread CurrentThread => _currentThread; public long LastContextSwitchTime { get; private set; } public long TotalIdleTimeTicks => _idleThread.TotalTimeRunning; @@ -87,6 +88,26 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading { KThread thread = context.PriorityQueue.ScheduledThreads(core).FirstOrDefault(); + if (thread != null && + thread.Owner != null && + thread.Owner.PinnedThreads[core] != null && + thread.Owner.PinnedThreads[core] != thread) + { + KThread candidate = thread.Owner.PinnedThreads[core]; + + if (candidate.KernelWaitersCount == 0 && !thread.Owner.IsExceptionUserThread(candidate)) + { + if (candidate.SchedFlags == ThreadSchedState.Running) + { + thread = candidate; + } + else + { + thread = null; + } + } + } + scheduledCoresMask |= context.Schedulers[core].SelectThread(thread); } diff --git a/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs b/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs index 64629248..cf95b015 100644 --- a/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs +++ b/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs @@ -11,6 +11,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading { class KThread : KSynchronizationObject, IKFutureSchedulerObject { + private const int TlsUserDisableCountOffset = 0x100; + private const int TlsUserInterruptFlagOffset = 0x102; + public const int MaxWaitSyncObjects = 64; private ManualResetEvent _schedulerWaitEvent; @@ -43,6 +46,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable; public ulong MutexAddress { get; set; } + public int KernelWaitersCount { get; private set; } public KProcess Owner { get; private set; } @@ -65,11 +69,14 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading private LinkedList _mutexWaiters; private LinkedListNode _mutexWaiterNode; + private LinkedList _pinnedWaiters; + public KThread MutexOwner { get; private set; } public int ThreadHandleForUserMutex { get; set; } private ThreadSchedState _forcePauseFlags; + private ThreadSchedState _forcePausePermissionFlags; public KernelResult ObjSyncResult { get; set; } @@ -79,11 +86,12 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading public int CurrentCore { get; set; } public int ActiveCore { get; set; } - private long _affinityMaskOverride; - private int _preferredCoreOverride; -#pragma warning disable CS0649 - private int _affinityOverrideCount; -#pragma warning restore CS0649 + public bool IsPinned { get; private set; } + + private long _originalAffinityMask; + private int _originalPreferredCore; + private int _originalBasePriority; + private int _coreMigrationDisableCount; public ThreadSchedState SchedFlags { get; private set; } @@ -108,6 +116,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading public long LastPc { get; set; } + private object ActivityOperationLock = new object(); + public KThread(KernelContext context) : base(context) { WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects]; @@ -116,6 +126,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading SiblingsPerCore = new LinkedListNode[KScheduler.CpuCoresCount]; _mutexWaiters = new LinkedList(); + _pinnedWaiters = new LinkedList(); } public KernelResult Initialize( @@ -147,6 +158,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading DynamicPriority = priority; BasePriority = priority; CurrentCore = cpuCore; + IsPinned = false; _entrypoint = entrypoint; _customThreadStart = customThreadStart; @@ -204,6 +216,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading _hasBeenInitialized = true; + _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask; + if (owner != null) { owner.SubscribeThreadEventHandlers(Context); @@ -301,6 +315,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading { KernelContext.CriticalSection.Enter(); + if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this) + { + Owner.UnpinThread(this); + } + ThreadSchedState result; if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0) @@ -405,6 +424,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading KernelContext.CriticalSection.Enter(); _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask; + _forcePausePermissionFlags = 0; bool decRef = ExitImpl(); @@ -433,6 +453,19 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading return decRef; } + private int GetEffectiveRunningCore() + { + for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++) + { + if (KernelContext.Schedulers[coreNumber].CurrentThread == this) + { + return coreNumber; + } + } + + return -1; + } + public KernelResult Sleep(long timeout) { KernelContext.CriticalSection.Enter(); @@ -465,7 +498,14 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading { KernelContext.CriticalSection.Enter(); - BasePriority = priority; + if (IsPinned) + { + _originalBasePriority = priority; + } + else + { + BasePriority = priority; + } UpdatePriorityInheritance(); @@ -497,53 +537,96 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading public KernelResult SetActivity(bool pause) { - KernelResult result = KernelResult.Success; - - KernelContext.CriticalSection.Enter(); - - ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; - - if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running) + lock (ActivityOperationLock) { + KernelResult result = KernelResult.Success; + + KernelContext.CriticalSection.Enter(); + + ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; + + if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + + if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending) + { + if (pause) + { + // Pause, the force pause flag should be clear (thread is NOT paused). + if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0) + { + Suspend(ThreadSchedState.ThreadPauseFlag); + } + else + { + result = KernelResult.InvalidState; + } + } + else + { + // Unpause, the force pause flag should be set (thread is paused). + if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0) + { + Resume(ThreadSchedState.ThreadPauseFlag); + } + else + { + result = KernelResult.InvalidState; + } + } + } + KernelContext.CriticalSection.Leave(); - return KernelResult.InvalidState; - } - - KernelContext.CriticalSection.Enter(); - - if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending) - { - if (pause) + if (result == KernelResult.Success && pause) { - // Pause, the force pause flag should be clear (thread is NOT paused). - if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0) + bool isThreadRunning = true; + + while (isThreadRunning) { - Suspend(ThreadSchedState.ThreadPauseFlag); - } - else - { - result = KernelResult.InvalidState; - } - } - else - { - // Unpause, the force pause flag should be set (thread is paused). - if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0) - { - Resume(ThreadSchedState.ThreadPauseFlag); - } - else - { - result = KernelResult.InvalidState; + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + break; + } + + isThreadRunning = false; + + if (IsPinned) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (currentThread.TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + result = KernelResult.ThreadTerminating; + + break; + } + + _pinnedWaiters.AddLast(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); + } + else + { + isThreadRunning = GetEffectiveRunningCore() >= 0; + } + + KernelContext.CriticalSection.Leave(); } } + + return result; } - - KernelContext.CriticalSection.Leave(); - KernelContext.CriticalSection.Leave(); - - return result; } public void CancelSynchronization() @@ -579,58 +662,105 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask) { - KernelContext.CriticalSection.Enter(); - - bool useOverride = _affinityOverrideCount != 0; - - // The value -3 is "do not change the preferred core". - if (newCore == -3) + lock (ActivityOperationLock) { - newCore = useOverride ? _preferredCoreOverride : PreferredCore; + KernelContext.CriticalSection.Enter(); - if ((newAffinityMask & (1 << newCore)) == 0) + bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0; + + // The value -3 is "do not change the preferred core". + if (newCore == -3) { - KernelContext.CriticalSection.Leave(); + newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore; - return KernelResult.InvalidCombination; - } - } - - if (useOverride) - { - _preferredCoreOverride = newCore; - _affinityMaskOverride = newAffinityMask; - } - else - { - long oldAffinityMask = AffinityMask; - - PreferredCore = newCore; - AffinityMask = newAffinityMask; - - if (oldAffinityMask != newAffinityMask) - { - int oldCore = ActiveCore; - - if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0) + if ((newAffinityMask & (1 << newCore)) == 0) { - if (PreferredCore < 0) + KernelContext.CriticalSection.Leave(); + + return KernelResult.InvalidCombination; + } + } + + if (isCoreMigrationDisabled) + { + _originalPreferredCore = newCore; + _originalAffinityMask = newAffinityMask; + } + else + { + long oldAffinityMask = AffinityMask; + + PreferredCore = newCore; + AffinityMask = newAffinityMask; + + if (oldAffinityMask != newAffinityMask) + { + int oldCore = ActiveCore; + + if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0) { - ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask); + if (PreferredCore < 0) + { + ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask); + } + else + { + ActiveCore = PreferredCore; + } + } + + AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore); + } + } + + KernelContext.CriticalSection.Leave(); + + bool targetThreadPinned = true; + + while (targetThreadPinned) + { + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + break; + } + + targetThreadPinned = false; + + int coreNumber = GetEffectiveRunningCore(); + bool isPinnedThreadCurrentlyRunning = coreNumber >= 0; + + if (isPinnedThreadCurrentlyRunning && ((1 << coreNumber) & AffinityMask) == 0) + { + if (IsPinned) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (currentThread.TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + _pinnedWaiters.AddLast(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); } else { - ActiveCore = PreferredCore; + targetThreadPinned = true; } } - AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore); + KernelContext.CriticalSection.Leave(); } + + return KernelResult.Success; } - - KernelContext.CriticalSection.Leave(); - - return KernelResult.Success; } private void CombineForcePauseFlags() @@ -638,7 +768,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading ThreadSchedState oldFlags = SchedFlags; ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; - SchedFlags = lowNibble | _forcePauseFlags; + SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags); AdjustScheduling(oldFlags); } @@ -1106,7 +1236,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading foreach (KThread thread in _mutexWaiters) { thread.MutexOwner = null; - thread._preferredCoreOverride = 0; + thread._originalPreferredCore = 0; thread.ObjSyncResult = KernelResult.InvalidState; thread.ReleaseAndResume(); @@ -1116,5 +1246,113 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading Owner?.DecrementThreadCountAndTerminateIfZero(); } + + public void Pin() + { + IsPinned = true; + _coreMigrationDisableCount++; + + int activeCore = ActiveCore; + + _originalPreferredCore = PreferredCore; + _originalAffinityMask = AffinityMask; + + ActiveCore = CurrentCore; + PreferredCore = CurrentCore; + AffinityMask = 1 << CurrentCore; + + if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask) + { + AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore); + } + + _originalBasePriority = BasePriority; + BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1); + UpdatePriorityInheritance(); + + // Disallows thread pausing + _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag; + CombineForcePauseFlags(); + + // TODO: Assign reduced SVC permissions + } + + public void Unpin() + { + IsPinned = false; + _coreMigrationDisableCount--; + + long affinityMask = AffinityMask; + int activeCore = ActiveCore; + + PreferredCore = _originalPreferredCore; + AffinityMask = _originalAffinityMask; + + if (AffinityMask != affinityMask) + { + if ((AffinityMask & 1 << ActiveCore) != 0) + { + if (PreferredCore >= 0) + { + ActiveCore = PreferredCore; + } + else + { + ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask); + } + + AdjustSchedulingForNewAffinity(affinityMask, activeCore); + } + } + + BasePriority = _originalBasePriority; + UpdatePriorityInheritance(); + + if (!TerminationRequested) + { + // Allows thread pausing + _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag; + CombineForcePauseFlags(); + + // TODO: Restore SVC permissions + } + + // Wake up waiters + foreach (KThread waiter in _pinnedWaiters) + { + waiter.ReleaseAndResume(); + } + + _pinnedWaiters.Clear(); + } + + public void SynchronizePreemptionState() + { + KernelContext.CriticalSection.Enter(); + + if (Owner != null && Owner.PinnedThreads[CurrentCore] == this) + { + ClearUserInterruptFlag(); + + Owner.UnpinThread(this); + } + + KernelContext.CriticalSection.Leave(); + } + + public ushort GetUserDisableCount() + { + return Owner.CpuMemory.Read(_tlsAddress + TlsUserDisableCountOffset); + } + + public void SetUserInterruptFlag() + { + Owner.CpuMemory.Write(_tlsAddress + TlsUserInterruptFlagOffset, 1); + } + + public void ClearUserInterruptFlag() + { + Owner.CpuMemory.Write(_tlsAddress + TlsUserInterruptFlagOffset, 0); + } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs b/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs index c9eaa6b3..9577075c 100644 --- a/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs +++ b/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs @@ -4,11 +4,12 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading { LowMask = 0xf, HighMask = 0xfff0, - ForcePauseMask = 0x70, + ForcePauseMask = 0x1f0, ProcessPauseFlag = 1 << 4, ThreadPauseFlag = 1 << 5, ProcessDebugPauseFlag = 1 << 6, + BacktracePauseFlag = 1 << 7, KernelInitPauseFlag = 1 << 8, None = 0,