Vulkan: Device Local and higher invocation count for buffer conversions (#5623)
Just some simple changes to the buffer conversion shaders. (stride conversion, D32S8 to D24S8) The first change is using a device local buffer for converted vertex buffers, since they're only read/written on the GPU. These paths don't trigger on NVIDIA, but if you force them to use it demonstrates the full extent writing to host owned memory from compute absolutely destroys them. AMD GPUs are less heavily affected by this issue, but since the game in question was writing 230MB from compute, I imagine it should have some effect. The second change is allowing the buffer conversion shaders to scale their work group count. While dividing the work between 32 invocations works OK for M1 macs, it's not so great for anything with more cores like AMD GPUs, which should be able to do a lot more parallel copies. Now, it scales by roughly 100 elements per invocation. Some stride change cases could be improved further by either limiting vertex buffer size somehow (reading the index buffer could help, but is always risky) or only updating regions that changed, rather than invalidating the whole thing.
This commit is contained in:
parent
12cbacffca
commit
93cd327873
6 changed files with 10 additions and 10 deletions
|
@ -967,7 +967,7 @@ namespace Ryujinx.Graphics.Vulkan
|
||||||
|
|
||||||
if (!_cachedConvertedBuffers.TryGetValue(offset, size, key, out var holder))
|
if (!_cachedConvertedBuffers.TryGetValue(offset, size, key, out var holder))
|
||||||
{
|
{
|
||||||
holder = _gd.BufferManager.Create(_gd, (size * 2 + 3) & ~3);
|
holder = _gd.BufferManager.Create(_gd, (size * 2 + 3) & ~3, baseType: BufferAllocationType.DeviceLocal);
|
||||||
|
|
||||||
_gd.PipelineInternal.EndRenderPass();
|
_gd.PipelineInternal.EndRenderPass();
|
||||||
_gd.HelperShader.ConvertI8ToI16(_gd, cbs, this, holder, offset, size);
|
_gd.HelperShader.ConvertI8ToI16(_gd, cbs, this, holder, offset, size);
|
||||||
|
@ -993,7 +993,7 @@ namespace Ryujinx.Graphics.Vulkan
|
||||||
{
|
{
|
||||||
int alignedStride = (stride + (alignment - 1)) & -alignment;
|
int alignedStride = (stride + (alignment - 1)) & -alignment;
|
||||||
|
|
||||||
holder = _gd.BufferManager.Create(_gd, (size / stride) * alignedStride);
|
holder = _gd.BufferManager.Create(_gd, (size / stride) * alignedStride, baseType: BufferAllocationType.DeviceLocal);
|
||||||
|
|
||||||
_gd.PipelineInternal.EndRenderPass();
|
_gd.PipelineInternal.EndRenderPass();
|
||||||
_gd.HelperShader.ChangeStride(_gd, cbs, this, holder, offset, size, stride, alignedStride);
|
_gd.HelperShader.ChangeStride(_gd, cbs, this, holder, offset, size, stride, alignedStride);
|
||||||
|
@ -1023,7 +1023,7 @@ namespace Ryujinx.Graphics.Vulkan
|
||||||
|
|
||||||
int convertedCount = pattern.GetConvertedCount(indexCount);
|
int convertedCount = pattern.GetConvertedCount(indexCount);
|
||||||
|
|
||||||
holder = _gd.BufferManager.Create(_gd, convertedCount * 4);
|
holder = _gd.BufferManager.Create(_gd, convertedCount * 4, baseType: BufferAllocationType.DeviceLocal);
|
||||||
|
|
||||||
_gd.PipelineInternal.EndRenderPass();
|
_gd.PipelineInternal.EndRenderPass();
|
||||||
_gd.HelperShader.ConvertIndexBuffer(_gd, cbs, this, holder, pattern, indexSize, offset, indexCount);
|
_gd.HelperShader.ConvertIndexBuffer(_gd, cbs, this, holder, pattern, indexSize, offset, indexCount);
|
||||||
|
|
|
@ -5,7 +5,6 @@ using Ryujinx.Graphics.Shader.Translation;
|
||||||
using Silk.NET.Vulkan;
|
using Silk.NET.Vulkan;
|
||||||
using System;
|
using System;
|
||||||
using System.Collections.Generic;
|
using System.Collections.Generic;
|
||||||
using System.IO;
|
|
||||||
using System.Numerics;
|
using System.Numerics;
|
||||||
using CompareOp = Ryujinx.Graphics.GAL.CompareOp;
|
using CompareOp = Ryujinx.Graphics.GAL.CompareOp;
|
||||||
using Format = Ryujinx.Graphics.GAL.Format;
|
using Format = Ryujinx.Graphics.GAL.Format;
|
||||||
|
@ -27,6 +26,7 @@ namespace Ryujinx.Graphics.Vulkan
|
||||||
class HelperShader : IDisposable
|
class HelperShader : IDisposable
|
||||||
{
|
{
|
||||||
private const int UniformBufferAlignment = 256;
|
private const int UniformBufferAlignment = 256;
|
||||||
|
private const int ConvertElementsPerWorkgroup = 32 * 100; // Work group size of 32 times 100 elements.
|
||||||
private const string ShaderBinariesPath = "Ryujinx.Graphics.Vulkan/Shaders/SpirvBinaries";
|
private const string ShaderBinariesPath = "Ryujinx.Graphics.Vulkan/Shaders/SpirvBinaries";
|
||||||
|
|
||||||
private readonly PipelineHelperShader _pipeline;
|
private readonly PipelineHelperShader _pipeline;
|
||||||
|
@ -894,7 +894,7 @@ namespace Ryujinx.Graphics.Vulkan
|
||||||
_pipeline.SetStorageBuffers(1, sbRanges);
|
_pipeline.SetStorageBuffers(1, sbRanges);
|
||||||
|
|
||||||
_pipeline.SetProgram(_programStrideChange);
|
_pipeline.SetProgram(_programStrideChange);
|
||||||
_pipeline.DispatchCompute(1, 1, 1);
|
_pipeline.DispatchCompute(1 + elems / ConvertElementsPerWorkgroup, 1, 1);
|
||||||
|
|
||||||
gd.BufferManager.Delete(bufferHandle);
|
gd.BufferManager.Delete(bufferHandle);
|
||||||
|
|
||||||
|
@ -1742,7 +1742,7 @@ namespace Ryujinx.Graphics.Vulkan
|
||||||
_pipeline.SetStorageBuffers(1, sbRanges);
|
_pipeline.SetStorageBuffers(1, sbRanges);
|
||||||
|
|
||||||
_pipeline.SetProgram(_programConvertD32S8ToD24S8);
|
_pipeline.SetProgram(_programConvertD32S8ToD24S8);
|
||||||
_pipeline.DispatchCompute(1, 1, 1);
|
_pipeline.DispatchCompute(1 + inSize / ConvertElementsPerWorkgroup, 1, 1);
|
||||||
|
|
||||||
gd.BufferManager.Delete(bufferHandle);
|
gd.BufferManager.Delete(bufferHandle);
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ void main()
|
||||||
int sourceOffset = stride_arguments_data.w;
|
int sourceOffset = stride_arguments_data.w;
|
||||||
|
|
||||||
int strideRemainder = targetStride - sourceStride;
|
int strideRemainder = targetStride - sourceStride;
|
||||||
int invocations = int(gl_WorkGroupSize.x);
|
int invocations = int(gl_WorkGroupSize.x * gl_NumWorkGroups.x);
|
||||||
|
|
||||||
int copiesRequired = bufferSize / sourceStride;
|
int copiesRequired = bufferSize / sourceStride;
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ void main()
|
||||||
int allInvocationCopies = copiesRequired / invocations;
|
int allInvocationCopies = copiesRequired / invocations;
|
||||||
|
|
||||||
// - Extra remainder copy that this invocation performs.
|
// - Extra remainder copy that this invocation performs.
|
||||||
int index = int(gl_LocalInvocationID.x);
|
int index = int(gl_GlobalInvocationID.x);
|
||||||
int extra = (index < (copiesRequired % invocations)) ? 1 : 0;
|
int extra = (index < (copiesRequired % invocations)) ? 1 : 0;
|
||||||
|
|
||||||
int copyCount = allInvocationCopies + extra;
|
int copyCount = allInvocationCopies + extra;
|
||||||
|
|
|
@ -23,7 +23,7 @@ layout (std430, set = 1, binding = 2) buffer out_s
|
||||||
void main()
|
void main()
|
||||||
{
|
{
|
||||||
// Determine what slice of the stride copies this invocation will perform.
|
// Determine what slice of the stride copies this invocation will perform.
|
||||||
int invocations = int(gl_WorkGroupSize.x);
|
int invocations = int(gl_WorkGroupSize.x * gl_NumWorkGroups.x);
|
||||||
|
|
||||||
int copiesRequired = pixelCount;
|
int copiesRequired = pixelCount;
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ void main()
|
||||||
int allInvocationCopies = copiesRequired / invocations;
|
int allInvocationCopies = copiesRequired / invocations;
|
||||||
|
|
||||||
// - Extra remainder copy that this invocation performs.
|
// - Extra remainder copy that this invocation performs.
|
||||||
int index = int(gl_LocalInvocationID.x);
|
int index = int(gl_GlobalInvocationID.x);
|
||||||
int extra = (index < (copiesRequired % invocations)) ? 1 : 0;
|
int extra = (index < (copiesRequired % invocations)) ? 1 : 0;
|
||||||
|
|
||||||
int copyCount = allInvocationCopies + extra;
|
int copyCount = allInvocationCopies + extra;
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Reference in a new issue