0
0
Fork 0
mirror of https://github.com/GreemDev/Ryujinx.git synced 2024-12-23 17:25:48 +00:00
Ryujinx/Ryujinx.Graphics.Texture/Utils/BC67Utils.cs
gdkchan 2232e4ae87
Vulkan backend (#2518)
* WIP Vulkan implementation

* No need to initialize attributes on the SPIR-V backend anymore

* Allow multithreading shaderc and vkCreateShaderModule

You'll only really see the benefit here with threaded-gal or parallel shader cache compile.

Fix shaderc multithreaded changes

Thread safety for shaderc Options constructor

Dunno how they managed to make a constructor not thread safe, but you do you. May avoid some freezes.

* Support multiple levels/layers for blit.

Fixes MK8D when scaled, maybe a few other games. AMD software "safe" blit not supported right now.

* TextureStorage should hold a ref of the foreign storage, otherwise it might be freed while in use

* New depth-stencil blit method for AMD

* Workaround for AMD driver bug

* Fix some tessellation related issues (still doesn't work?)

* Submit command buffer before Texture GetData. (UE4 fix)

* DrawTexture support

* Fix BGRA on OpenGL backend

* Fix rebase build break

* Support format aliasing on SetImage

* Fix uniform buffers being lost when bindings are out of order

* Fix storage buffers being lost when bindings are out of order

(also avoid allocations when changing bindings)

* Use current command buffer for unscaled copy (perf)

Avoids flushing commands and renting a command buffer when fulfilling copy dependencies and when games do unscaled copies.

* Update to .net6

* Update Silk.NET to version 2.10.1

Somehow, massive performance boost. Seems like their vtable for looking up vulkan methods was really slow before.

* Fix PrimitivesGenerated query, disable Transform Feedback queries for now

Lets Splatoon 2 work on nvidia. (mostly)

* Update counter queue to be similar to the OGL one

Fixes softlocks when games had to flush counters.

* Don't throw when ending conditional rendering for now

This should be re-enabled when conditional rendering is enabled on nvidia etc.

* Update findMSB/findLSB to match master's instruction enum

* Fix triangle overlay on SMO, Captain Toad, maybe others?

* Don't make Intel Mesa pay for Intel Windows bugs

* Fix samplers with MinFilter Linear or Nearest (fixes New Super Mario Bros U Deluxe black borders)

* Update Spv.Generator

* Add alpha test emulation on shader (but no shader specialisation yet...)

* Fix R4G4B4A4Unorm texture format permutation

* Validation layers should be enabled for any log level other than None

* Add barriers around vkCmdCopyImage

Write->Read barrier for src image (we want to wait for a write to read it)
Write->Read barrier for dst image (we want to wait for the copy to complete before use)

* Be a bit more careful with texture access flags, since it can be used for anything

* Device local mapping for all buffers

May avoid issues with drivers with NVIDIA on linux/older gpus on windows when using large buffers (?)
Also some performance things and fixes issues with opengl games loading textures weird.

* Cleanup, disable device local buffers for now.

* Add single queue support

Multiqueue seems to be a bit more responsive on NVIDIA. Should fix texture flush on intel. AMD has been forced to single queue for an experiment.

* Fix some validation errors around extended dynamic state

* Remove Intel bug workaround, it was fixed on the latest driver

* Use circular queue for checking consumption on command buffers

Speeds up games that spam command buffers a little. Avoids checking multiple command buffers if multiple are active at once.

* Use SupportBufferUpdater, add single layer flush

* Fix counter queue leak when game decides to use host conditional rendering

* Force device local storage for textures (fixes linux performance)

* Port #3019

* Insert barriers around vkCmdBlitImage (may fix some amd flicker)

* Fix transform feedback on Intel, gl_Position feedback and clears to inexistent depth buffers

* Don't pause transform feedback for multi draw

* Fix draw outside of render pass and missing capability

* Workaround for wrong last attribute on AMD (affects FFVII, STRIKERS1945, probably more)

* Better workaround for AMD vertex buffer size alignment issue

* More instructions + fixes on SPIR-V backend

* Allow custom aspect ratio on Vulkan

* Correct GTK UI status bar positions

* SPIR-V: Functions must always end with a return

* SPIR-V: Fix ImageQuerySizeLod

* SPIR-V: Set DepthReplacing execution mode when FragDepth is modified

* SPIR-V: Implement LoopContinue IR instruction

* SPIR-V: Geometry shader support

* SPIR-V: Use correct binding number on storage buffers array

* Reduce allocations for Spir-v serialization

Passes BinaryWriter instead of the stream to Write and WriteOperand

- Removes creation of BinaryWriter for each instruction
- Removes allocations for literal string

* Some optimizations to Spv.Generator

- Dictionary for lookups of type declarations, constants, extinst
- LiteralInteger internal data format -> ushort
- Deterministic HashCode implementation to avoid spirv result not being the same between runs
- Inline operand list instead of List<T>, falls back to array if many operands. (large performance boost)

TODO: improve instruction allocation, structured program creator, ssa?

* Pool Spv.Generator resources, cache delegates, spv opts

- Pools for Instructions and LiteralIntegers. Can be passed in when creating the generator module.
  - NewInstruction is called instead of new Instruction()
  - Ryujinx SpirvGenerator passes in some pools that are static. The idea is for these to be shared between threads eventually.
- Estimate code size when creating the output MemoryStream
- LiteralInteger pools using ThreadStatic pools that are initialized before and after creation... not sure of a better way since the way these are created is via implicit cast.

Also, cache delegates for Spv.Generator for functions that are passed around to GenerateBinary etc, since passing the function raw creates a delegate on each call.

TODO: update python spv cs generator to make the coregrammar with NewInstruction and the `params` overloads.

* LocalDefMap for Ssa Rewriter

Rather than allocating a large array of all registers for each block in the shader, allocate one array of all registers and clear it between blocks. Reduces allocations in the shader translator.

* SPIR-V: Transform feedback support

* SPIR-V: Fragment shader interlock support (and image coherency)

* SPIR-V: Add early fragment tests support

* SPIR-V: Implement SwizzleAdd, add missing Triangles ExecutionMode for geometry shaders, remove SamplerType field from TextureMeta

* Don't pass depth clip state right now (fix decals)

Explicitly disabling it is incorrect. OpenGL currently automatically disables based on depth clamp, which is the behaviour if this state is omitted.

* Multisampling support

* Multisampling: Use resolve if src samples count > dst samples count

* Multisampling: We can only resolve for unscaled copies

* SPIR-V: Only add FSI exec mode if used.

* SPIR-V: Use ConstantComposite for Texture Offset Vector

Fixes a bunch of freezes with SPIR-V on AMD hardware, and validation errors. Note: Obviously assumes input offsets are constant, which they currently are.

* SPIR-V: Don't OpReturn if we already OpExit'ed

Fixes spir-v parse failure and stack smashing in RADV (obviously you still need bolist)

* SPIR-V: Only use input attribute type for input attributes

Output vertex attributes should always be of type float.

* Multithreaded Pipeline Compilation

* Address some feedback

* Make this 32

* Update topology with GpuAccessorState

* Cleanup for merge (note: disables spir-v)

* Make more robust to shader compilation failure

- Don't freeze when GLSL compilation fails
- Background SPIR-V pipeline compile failure results in skipped draws, similar to GLSL compilation failure.

* Fix Multisampling

* Only update fragment scale count if a vertex texture needs a scale.

Fixes a performance regression introduced by texture scaling in the vertex stage where support buffer updates would be very frequent, even at 1x, if any textures were used on the vertex stage.

This check doesn't exactly look cheap (a flag in the shader stage would probably be preferred), but it is much cheaper than uploading scales in both vulkan and opengl, so it will do for now.

* Use a bitmap to do granular tracking for buffer uploads.

This path is only taken if the much faster check of "is the buffer rented at all" is triggered, so it doesn't actually end up costing too much, and the time saved by not ending render passes (and on gpu for not waiting on barriers) is probably helpful.

Avoids ending render passes to update buffer data (not all the time)
- 140-180 to 35-45 in SMO metro kingdom (these updates are in the UI)
- Very variable 60-150(!) to 16-25 in mario kart 8 (these updates are in the UI)

As well as allowing more data to be preloaded persistently, this will also allow more data to be loaded in the preload buffer, which should be faster as it doesn't need to insert barriers between draws. (and on tbdr, does not need to flush and reload tile memory)

Improves performance in GPU limited scenarios. Should notably improve performance on TBDR gpus. Still a lot more to do here.

* Copy query results after RP ends, rather than ending to copy

We need to end the render pass to get the data (submit command buffer) anyways...

Reduces render passes created in games that use queries.

* Rework Query stuff a bit to avoid render pass end

Tries to reset returned queries in background when possible, rather than ending the render pass.

Still ends render pass when resetting a counter after draws, but maybe that can be solved too. (by just pulling an empty object off the pool?)

* Remove unnecessary lines

Was for testing

* Fix validation error for query reset

Need to think of a better way to do this.

* SPIR-V: Fix SwizzleAdd and some validation errors

* SPIR-V: Implement attribute indexing and StoreAttribute

* SPIR-V: Fix TextureSize for MS and Buffer sampler types

* Fix relaunch issues

* SPIR-V: Implement LogicalExclusiveOr

* SPIR-V: Constant buffer indexing support

* Ignore unsupported attributes rather than throwing (matches current GLSL behaviour)

* SPIR-V: Implement tessellation support

* SPIR-V: Geometry shader passthrough support

* SPIR-V: Implement StoreShader8/16 and StoreStorage8/16

* SPIR-V: Resolution scale support and fix TextureSample multisample with LOD bug

* SPIR-V: Fix field index for scale count

* SPIR-V: Fix another case of wrong field index

* SPIRV/GLSL: More scaling related fixes

* SPIR-V: Fix ImageLoad CompositeExtract component type

* SPIR-V: Workaround for Intel FrontFacing bug

* Enable SPIR-V backend by default

* Allow null samplers (samplers are not required when only using texelFetch to access the texture)

* Fix some validation errors related to texel block view usage flag and invalid image barrier base level

* Use explicit subgroup size if we can (might fix some block flickering on AMD)

* Take componentMask and scissor into account when clearing framebuffer attachments

* Add missing barriers around CmdFillBuffer (fixes Monster Hunter Rise flickering on NVIDIA)

* Use ClampToEdge for Clamp sampler address mode on Vulkan (fixes Hollow Knight)

Clamp is unsupported on Vulkan, but ClampToEdge behaves almost the same. ClampToBorder on the other hand (which was being used before) is pretty different

* Shader specialization for new Vulkan required state (fixes remaining alpha test issues, vertex stretching on AMD on Crash Bandicoot, etc)

* Check if the subgroup size is supported before passing a explicit size

* Only enable ShaderFloat64 if the GPU supports it

* We don't need to recompile shaders if alpha test state changed but alpha test is disabled

* Enable shader cache on Vulkan and implement MultiplyHighS32/U32 on SPIR-V (missed those before)

* Fix pipeline state saving before it is updated.

This should fix a few warnings and potential stutters due to bad pipeline states being saved in the cache. You may need to clear your guest cache.

* Allow null samplers on OpenGL backend

* _unit0Sampler should be set only for binding 0

* Remove unused PipelineConverter format variable (was causing IOR)

* Raise textures limit to 64 on Vulkan

* No need to pack the shader binaries if shader cache is disabled

* Fix backbuffer not being cleared and scissor not being re-enabled on OpenGL

* Do not clear unbound framebuffer color attachments

* Geometry shader passthrough emulation

* Consolidate UpdateDepthMode and GetDepthMode implementation

* Fix A1B5G5R5 texture format and support R4G4 on Vulkan

* Add barrier before use of some modified images

* Report 32 bit query result on AMD windows (smo issue)

* Add texture recompression support (disabled for now)

It recompresses ASTC textures into BC7, which might reduce VRAM usage significantly on games that uses ASTC textures

* Do not report R4G4 format as supported on Vulkan

It was causing mario head to become white on Super Mario 64 (???)

* Improvements to -1 to 1 depth mode.

- Transformation is only applied on the last stage in the vertex pipeline.
- Should fix some issues with geometry and tessellation (hopefully)
- Reading back FragCoord Z on fragment will transform back to -1 to 1.

* Geometry Shader index count from ThreadsPerInputPrimitive

Generally fixes SPIR-V emitting too many triangles, may change games in OpenGL

* Remove gl_FragDepth scaling

This is always 0-1; the other two issues were causing the problems. Fixes regression with Xenoblade.

* Add Gl StencilOp enum values to Vulkan

* Update guest cache to v1.1 (due to specialization state changes)

This will explode your shader cache from earlier vulkan build, but it must be done. 😔

* Vulkan/SPIR-V support for viewport inverse

* Fix typo

* Don't create query pools for unsupported query types

* Return of the Vector Indexing Bug

One day, everyone will get this right.

* Check for transform feedback query support

Sometimes transform feedback is supported without the query type.

* Fix gl_FragCoord.z transformation

FragCoord.z is always in 0-1, even when the real depth range is -1 to 1. Turns out the only bug was geo and tess stage outputs.

Fixes Pokemon Sword/Shield, possibly others.

* Fix Avalonia Rebase

Vulkan is currently not available on Avalonia, but the build does work and you can use opengl.

* Fix headless build

* Add support for BC6 and BC7 decompression, decompress all BC formats if they are not supported by the host

* Fix BCn 4/5 conversion, GetTextureTarget

BCn 4/5 could generate invalid data when a line's size in bytes was not divisible by 4, which both backends expect.

GetTextureTarget was not creating a view with the replacement format.

* Fix dependency

* Fix inverse viewport transform vector type on SPIR-V

* Do not require null descriptors support

* If MultiViewport is not supported, do not try to set more than one viewport/scissor

* Bounds check on bitmap add.

* Flush queries on attachment change rather than program change

Occlusion queries are usually used in a depth only pass so the attachments changing is a better indication of the query block ending.

Write mask changes are also considered since some games do depth only pass by setting 0 write mask on all the colour targets.

* Add support for avalonia (#6)

* add avalonia support

* only lock around skia flush

* addressed review

* cleanup

* add fallback size if avalonia attempts to render but the window size is 0. read desktop scale after enabling dpi check

* fix getting window handle on linux. skip render is size is 0

* Combine non-buffer with buffer image descriptor sets

* Support multisample texture copy with automatic resolve on Vulkan

* Remove old CompileShader methods from the Vulkan backend

* Add minimal pipeline layouts that only contains used bindings

They are used by helper shaders, the intention is avoiding needing to recompile the shaders (from GLSL to SPIR-V) if the bindings changes on the translated guest shaders

* Pre-compile helper shader as SPIR-V, and some fixes

* Remove pre-compiled shaderc binary for Windows as its no longer needed by default

* Workaround RADV crash

Enabling the descriptor indexing extension, even if it is not used, forces the radv driver to use "bolist".

* Use RobustBufferAccess on NVIDIA gpus

Avoids the SMO waterfall triangle on older NVIDIA gpus.

* Implement GPU selector and expose texture recompression on the UI and config

* Fix and enable background compute shader compilation

Also disables warnings from shader cache pipeline misses.

* Fix error due to missing subpass dependency when Attachment Write -> Shader Read barriers are added

* If S8D24 is not supported, use D32FS8

* Ensure all fences are destroyed on dispose

* Pre-allocate arrays up front on DescriptorSetUpdater, allows the removal of some checks

* Add missing clear layer parameter after rebase

* Use selected gpu from config for avalonia (#7)

* use configured device

* address review

* Fix D32S8 copy workaround (AMD)

Fixes water in Pokemon Legends Arceus on AMD GPUs. Possibly fixes other things.

* Use push descriptors for uniform buffer updates (disabled for now)

* Push descriptor support check, buffer redundancy checks

Should make push descriptors faster, needs more testing though.

* Increase light command buffer pool to 2 command buffers, throw rather than returning invalid cbs

* Adjust bindings array sizes

* Force submit command buffers if memory in use by its resources is high

* Add workaround for AMD GCN cubemap view sins

`ImageCreateCubeCompatibleBit` seems to generally break 2D array textures with mipmaps... even if they are eventually aliased as a cubemap with mipmaps. Forcing a copy here works around the issue.

This could be used in future if enabling this bit reduces performance on certain GPUs. (mobile class is generally a worry)

Currently also enabled on Linux as I don't know if they managed to dodge this bug (someone please tell me). Not enabled on Vega at the moment, but easy to add if the issue is there.

* Add mobile, non-RX variants to the GCN regex.

Also make sure that the 3 digit ones only include numbers starting with 7 or 8.

* Increase image limit per stage from 8 to 16

Xenoblade Chronicles 2 was hiting the limit of 8

* Minor code cleanup

* Fix NRE caused by SupportBufferUpdater calling pipeline ClearBuffer

* Add gpu selector to Avalonia (#8)

* Add gpu selector to avalonia settings

* show backend label on window

* some fixes

* address review

* Minor changes to the Avalonia UI

* Update graphics window UI and locales. (#9)

* Update xaml and update locales

* locale updates

Did my best here but likely needs to be checked by native speakers, especially the use of ampersands in greek, russian and turkish?

* Fix locales with more (?) correct translations.

* add separator to render widget

* fix spanish and portuguese

* Add new IdList, replaces buffer list that could not remove elements and had unbounded growth

* Don't crash the settings window if Vulkan is not supported

* Fix Actions menu not being clickable on GTK UI after relaunch

* Rename VulkanGraphicsDevice to VulkanRenderer and Renderer to OpenGLRenderer

* Fix IdList and make it not thread safe

* Revert useless OpenGL format table changes

* Fix headless project build

* List throws ArgumentOutOfRangeException

* SPIR-V: Fix tessellation

* Increase shader cache version due to tessellation fix

* Reduce number of Sync objects created (improves perf in some specific titles)

* Fix vulkan validation errors for NPOT compressed upload and GCN workaround.

* Add timestamp to the shader cache and force rebuild if host cache is outdated

* Prefer Mail box present mode for popups (#11)

* Prefer Mail box present mode

* fix debug

* switch present mode when vsync is toggled

* only disable vsync on the main window

* SPIR-V: Fix geometry shader input load with transform feedback

* BC7 Encoder: Prefer more precision on alpha rather than RGB when alpha is 0

* Fix Avalonia build

* Address initial PR feedback

* Only set transform feedback outputs on last vertex stage

* Address riperiperi PR feedback

* Remove outdated comment

* Remove unused constructor

* Only throw for negative results

* Throw for QueueSubmit and other errors

No point in delaying the inevitable

* Transform feedback decorations inside gl_PerVertex struct breaks the NVIDIA compiler

* Fix some resolution scale issues

* No need for two UpdateScale calls

* Fix comments on SPIR-V generator project

* Try to fix shader local memory size

On DOOM, a shader is using local memory, but both Low and High size are 0, CRS size is 1536, it seems to store on that region?

* Remove RectangleF that is now unused

* Fix ImageGather with multiple offsets

Needs ImageGatherExtended capability, and must use `ConstantComposite` instead of `CompositeConstruct`

* Address PR feedback from jD in all projects except Avalonia

* Address most of jD PR feedback on Avalonia

* Remove unsafe

* Fix VulkanSkiaGpu

* move present mode request out of Create Swapchain method

* split more parts of create swapchain

* addressed reviews

* addressed review

* Address second batch of jD PR feedback

* Fix buffer <-> image copy row length and height alignment

AlignUp helper does not support NPOT alignment, and ASTC textures can have NPOT block sizes

* Better fix for NPOT alignment issue

* Use switch expressions on Vulkan EnumConversion

Thanks jD

* Fix Avalonia build

* Add Vulkan selection prompt on startup

* Grammar fixes on Vulkan prompt message

* Add missing Vulkan migration flag

Co-authored-by: riperiperi <rhy3756547@hotmail.com>
Co-authored-by: Emmanuel Hansen <emmausssss@gmail.com>
Co-authored-by: MutantAura <44103205+MutantAura@users.noreply.github.com>
2022-07-31 18:26:06 -03:00

1327 lines
54 KiB
C#

using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
namespace Ryujinx.Graphics.Texture.Utils
{
static class BC67Utils
{
private static byte[][] _quantizationLut;
private static byte[][] _quantizationLutNoPBit;
static BC67Utils()
{
_quantizationLut = new byte[5][];
_quantizationLutNoPBit = new byte[5][];
for (int depth = 4; depth < 9; depth++)
{
byte[] lut = new byte[512];
byte[] lutNoPBit = new byte[256];
for (int i = 0; i < lut.Length; i++)
{
lut[i] = QuantizeComponentForLut((byte)i, depth, i >> 8);
if (i < lutNoPBit.Length)
{
lutNoPBit[i] = QuantizeComponentForLut((byte)i, depth);
}
}
_quantizationLut[depth - 4] = lut;
_quantizationLutNoPBit[depth - 4] = lutNoPBit;
}
}
public static (RgbaColor8, RgbaColor8) GetMinMaxColors(ReadOnlySpan<uint> tile, int w, int h)
{
if (Sse41.IsSupported && w == 4 && h == 4)
{
GetMinMaxColorsOneSubset4x4Sse41(tile, out RgbaColor8 minColor, out RgbaColor8 maxColor);
return (minColor, maxColor);
}
else
{
RgbaColor8 minColor = new RgbaColor8(255, 255, 255, 255);
RgbaColor8 maxColor = default;
for (int i = 0; i < tile.Length; i++)
{
RgbaColor8 color = RgbaColor8.FromUInt32(tile[i]);
minColor.R = Math.Min(minColor.R, color.R);
minColor.G = Math.Min(minColor.G, color.G);
minColor.B = Math.Min(minColor.B, color.B);
minColor.A = Math.Min(minColor.A, color.A);
maxColor.R = Math.Max(maxColor.R, color.R);
maxColor.G = Math.Max(maxColor.G, color.G);
maxColor.B = Math.Max(maxColor.B, color.B);
maxColor.A = Math.Max(maxColor.A, color.A);
}
return (minColor, maxColor);
}
}
public static void GetMinMaxColors(
ReadOnlySpan<byte> partitionTable,
ReadOnlySpan<uint> tile,
int w,
int h,
Span<RgbaColor8> minColors,
Span<RgbaColor8> maxColors,
int subsetCount)
{
if (Sse41.IsSupported && w == 4 && h == 4)
{
if (subsetCount == 1)
{
GetMinMaxColorsOneSubset4x4Sse41(tile, out minColors[0], out maxColors[0]);
return;
}
else if (subsetCount == 2)
{
GetMinMaxColorsTwoSubsets4x4Sse41(partitionTable, tile, minColors, maxColors);
return;
}
}
minColors.Fill(new RgbaColor8(255, 255, 255, 255));
int i = 0;
for (int ty = 0; ty < h; ty++)
{
for (int tx = 0; tx < w; tx++)
{
int subset = partitionTable[ty * w + tx];
RgbaColor8 color = RgbaColor8.FromUInt32(tile[i++]);
minColors[subset].R = Math.Min(minColors[subset].R, color.R);
minColors[subset].G = Math.Min(minColors[subset].G, color.G);
minColors[subset].B = Math.Min(minColors[subset].B, color.B);
minColors[subset].A = Math.Min(minColors[subset].A, color.A);
maxColors[subset].R = Math.Max(maxColors[subset].R, color.R);
maxColors[subset].G = Math.Max(maxColors[subset].G, color.G);
maxColors[subset].B = Math.Max(maxColors[subset].B, color.B);
maxColors[subset].A = Math.Max(maxColors[subset].A, color.A);
}
}
}
private static unsafe void GetMinMaxColorsOneSubset4x4Sse41(ReadOnlySpan<uint> tile, out RgbaColor8 minColor, out RgbaColor8 maxColor)
{
Vector128<byte> min = Vector128<byte>.AllBitsSet;
Vector128<byte> max = Vector128<byte>.Zero;
Vector128<byte> row0, row1, row2, row3;
fixed (uint* pTile = tile)
{
row0 = Sse2.LoadVector128(pTile).AsByte();
row1 = Sse2.LoadVector128(pTile + 4).AsByte();
row2 = Sse2.LoadVector128(pTile + 8).AsByte();
row3 = Sse2.LoadVector128(pTile + 12).AsByte();
}
min = Sse2.Min(min, row0);
max = Sse2.Max(max, row0);
min = Sse2.Min(min, row1);
max = Sse2.Max(max, row1);
min = Sse2.Min(min, row2);
max = Sse2.Max(max, row2);
min = Sse2.Min(min, row3);
max = Sse2.Max(max, row3);
minColor = HorizontalMin(min);
maxColor = HorizontalMax(max);
}
private static unsafe void GetMinMaxColorsTwoSubsets4x4Sse41(
ReadOnlySpan<byte> partitionTable,
ReadOnlySpan<uint> tile,
Span<RgbaColor8> minColors,
Span<RgbaColor8> maxColors)
{
Vector128<byte> partitionMask;
fixed (byte* pPartitionTable = partitionTable)
{
partitionMask = Sse2.LoadVector128(pPartitionTable);
}
Vector128<byte> subset0Mask = Sse2.CompareEqual(partitionMask, Vector128<byte>.Zero);
Vector128<byte> subset0MaskRep16Low = Sse2.UnpackLow(subset0Mask, subset0Mask);
Vector128<byte> subset0MaskRep16High = Sse2.UnpackHigh(subset0Mask, subset0Mask);
Vector128<byte> subset0Mask0 = Sse2.UnpackLow(subset0MaskRep16Low.AsInt16(), subset0MaskRep16Low.AsInt16()).AsByte();
Vector128<byte> subset0Mask1 = Sse2.UnpackHigh(subset0MaskRep16Low.AsInt16(), subset0MaskRep16Low.AsInt16()).AsByte();
Vector128<byte> subset0Mask2 = Sse2.UnpackLow(subset0MaskRep16High.AsInt16(), subset0MaskRep16High.AsInt16()).AsByte();
Vector128<byte> subset0Mask3 = Sse2.UnpackHigh(subset0MaskRep16High.AsInt16(), subset0MaskRep16High.AsInt16()).AsByte();
Vector128<byte> min0 = Vector128<byte>.AllBitsSet;
Vector128<byte> min1 = Vector128<byte>.AllBitsSet;
Vector128<byte> max0 = Vector128<byte>.Zero;
Vector128<byte> max1 = Vector128<byte>.Zero;
Vector128<byte> row0, row1, row2, row3;
fixed (uint* pTile = tile)
{
row0 = Sse2.LoadVector128(pTile).AsByte();
row1 = Sse2.LoadVector128(pTile + 4).AsByte();
row2 = Sse2.LoadVector128(pTile + 8).AsByte();
row3 = Sse2.LoadVector128(pTile + 12).AsByte();
}
min0 = Sse2.Min(min0, Sse41.BlendVariable(min0, row0, subset0Mask0));
min0 = Sse2.Min(min0, Sse41.BlendVariable(min0, row1, subset0Mask1));
min0 = Sse2.Min(min0, Sse41.BlendVariable(min0, row2, subset0Mask2));
min0 = Sse2.Min(min0, Sse41.BlendVariable(min0, row3, subset0Mask3));
min1 = Sse2.Min(min1, Sse2.Or(row0, subset0Mask0));
min1 = Sse2.Min(min1, Sse2.Or(row1, subset0Mask1));
min1 = Sse2.Min(min1, Sse2.Or(row2, subset0Mask2));
min1 = Sse2.Min(min1, Sse2.Or(row3, subset0Mask3));
max0 = Sse2.Max(max0, Sse2.And(row0, subset0Mask0));
max0 = Sse2.Max(max0, Sse2.And(row1, subset0Mask1));
max0 = Sse2.Max(max0, Sse2.And(row2, subset0Mask2));
max0 = Sse2.Max(max0, Sse2.And(row3, subset0Mask3));
max1 = Sse2.Max(max1, Sse2.AndNot(subset0Mask0, row0));
max1 = Sse2.Max(max1, Sse2.AndNot(subset0Mask1, row1));
max1 = Sse2.Max(max1, Sse2.AndNot(subset0Mask2, row2));
max1 = Sse2.Max(max1, Sse2.AndNot(subset0Mask3, row3));
minColors[0] = HorizontalMin(min0);
minColors[1] = HorizontalMin(min1);
maxColors[0] = HorizontalMax(max0);
maxColors[1] = HorizontalMax(max1);
}
private static RgbaColor8 HorizontalMin(Vector128<byte> x)
{
x = Sse2.Min(x, Sse2.Shuffle(x.AsInt32(), 0x31).AsByte());
x = Sse2.Min(x, Sse2.Shuffle(x.AsInt32(), 2).AsByte());
return RgbaColor8.FromUInt32(x.AsUInt32().GetElement(0));
}
private static RgbaColor8 HorizontalMax(Vector128<byte> x)
{
x = Sse2.Max(x, Sse2.Shuffle(x.AsInt32(), 0x31).AsByte());
x = Sse2.Max(x, Sse2.Shuffle(x.AsInt32(), 2).AsByte());
return RgbaColor8.FromUInt32(x.AsUInt32().GetElement(0));
}
public static int SelectIndices(
ReadOnlySpan<uint> values,
uint endPoint0,
uint endPoint1,
int pBit0,
int pBit1,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
uint alphaMask)
{
if (Sse41.IsSupported)
{
if (indexBitCount == 2)
{
return Select2BitIndicesSse41(
values,
endPoint0,
endPoint1,
pBit0,
pBit1,
indexBitCount,
indexCount,
colorDepth,
alphaDepth,
alphaMask);
}
else if (indexBitCount == 3)
{
return Select3BitIndicesSse41(
values,
endPoint0,
endPoint1,
pBit0,
pBit1,
indexBitCount,
indexCount,
colorDepth,
alphaDepth,
alphaMask);
}
else if (indexBitCount == 4)
{
return Select4BitIndicesOneSubsetSse41(
values,
endPoint0,
endPoint1,
pBit0,
pBit1,
indexBitCount,
indexCount,
colorDepth,
alphaDepth,
alphaMask);
}
}
return SelectIndicesFallback(
values,
endPoint0,
endPoint1,
pBit0,
pBit1,
indexBitCount,
indexCount,
colorDepth,
alphaDepth,
alphaMask);
}
private static unsafe int Select2BitIndicesSse41(
ReadOnlySpan<uint> values,
uint endPoint0,
uint endPoint1,
int pBit0,
int pBit1,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
uint alphaMask)
{
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
int errorSum = 0;
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoint0), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoint1), colorDepth, alphaDepth, pBit1);
Vector128<byte> c0Rep = Vector128.Create(c0.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c1Rep = Vector128.Create(c1.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c0c1 = Sse2.UnpackLow(c0Rep, c1Rep);
Vector128<byte> rWeights;
Vector128<byte> lWeights;
fixed (byte* pWeights = BC67Tables.Weights[0], pInvWeights = BC67Tables.InverseWeights[0])
{
rWeights = Sse2.LoadScalarVector128((uint*)pWeights).AsByte();
lWeights = Sse2.LoadScalarVector128((uint*)pInvWeights).AsByte();
}
Vector128<byte> iWeights = Sse2.UnpackLow(lWeights, rWeights);
Vector128<byte> iWeights01 = Sse2.UnpackLow(iWeights.AsInt16(), iWeights.AsInt16()).AsByte();
Vector128<byte> iWeights0 = Sse2.UnpackLow(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights1 = Sse2.UnpackHigh(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<short> pal0 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights0.AsSByte()));
Vector128<short> pal1 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights1.AsSByte()));
for (int i = 0; i < values.Length; i++)
{
uint c = values[i] | alphaMask;
Vector128<short> color = Sse41.ConvertToVector128Int16(Vector128.Create(c).AsByte());
Vector128<short> delta0 = Sse2.Subtract(color, pal0);
Vector128<short> delta1 = Sse2.Subtract(color, pal1);
Vector128<int> deltaSum0 = Sse2.MultiplyAddAdjacent(delta0, delta0);
Vector128<int> deltaSum1 = Sse2.MultiplyAddAdjacent(delta1, delta1);
Vector128<int> deltaSum01 = Ssse3.HorizontalAdd(deltaSum0, deltaSum1);
Vector128<ushort> delta = Sse41.PackUnsignedSaturate(deltaSum01, deltaSum01);
Vector128<ushort> min = Sse41.MinHorizontal(delta);
ushort error = min.GetElement(0);
errorSum += error;
}
return errorSum;
}
private static unsafe int Select3BitIndicesSse41(
ReadOnlySpan<uint> values,
uint endPoint0,
uint endPoint1,
int pBit0,
int pBit1,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
uint alphaMask)
{
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
int errorSum = 0;
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoint0), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoint1), colorDepth, alphaDepth, pBit1);
Vector128<byte> c0Rep = Vector128.Create(c0.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c1Rep = Vector128.Create(c1.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c0c1 = Sse2.UnpackLow(c0Rep, c1Rep);
Vector128<byte> rWeights;
Vector128<byte> lWeights;
fixed (byte* pWeights = BC67Tables.Weights[1], pInvWeights = BC67Tables.InverseWeights[1])
{
rWeights = Sse2.LoadScalarVector128((ulong*)pWeights).AsByte();
lWeights = Sse2.LoadScalarVector128((ulong*)pInvWeights).AsByte();
}
Vector128<byte> iWeights = Sse2.UnpackLow(lWeights, rWeights);
Vector128<byte> iWeights01 = Sse2.UnpackLow(iWeights.AsInt16(), iWeights.AsInt16()).AsByte();
Vector128<byte> iWeights23 = Sse2.UnpackHigh(iWeights.AsInt16(), iWeights.AsInt16()).AsByte();
Vector128<byte> iWeights0 = Sse2.UnpackLow(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights1 = Sse2.UnpackHigh(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights2 = Sse2.UnpackLow(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<byte> iWeights3 = Sse2.UnpackHigh(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<short> pal0 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights0.AsSByte()));
Vector128<short> pal1 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights1.AsSByte()));
Vector128<short> pal2 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights2.AsSByte()));
Vector128<short> pal3 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights3.AsSByte()));
for (int i = 0; i < values.Length; i++)
{
uint c = values[i] | alphaMask;
Vector128<short> color = Sse41.ConvertToVector128Int16(Vector128.Create(c).AsByte());
Vector128<short> delta0 = Sse2.Subtract(color, pal0);
Vector128<short> delta1 = Sse2.Subtract(color, pal1);
Vector128<short> delta2 = Sse2.Subtract(color, pal2);
Vector128<short> delta3 = Sse2.Subtract(color, pal3);
Vector128<int> deltaSum0 = Sse2.MultiplyAddAdjacent(delta0, delta0);
Vector128<int> deltaSum1 = Sse2.MultiplyAddAdjacent(delta1, delta1);
Vector128<int> deltaSum2 = Sse2.MultiplyAddAdjacent(delta2, delta2);
Vector128<int> deltaSum3 = Sse2.MultiplyAddAdjacent(delta3, delta3);
Vector128<int> deltaSum01 = Ssse3.HorizontalAdd(deltaSum0, deltaSum1);
Vector128<int> deltaSum23 = Ssse3.HorizontalAdd(deltaSum2, deltaSum3);
Vector128<ushort> delta = Sse41.PackUnsignedSaturate(deltaSum01, deltaSum23);
Vector128<ushort> min = Sse41.MinHorizontal(delta);
ushort error = min.GetElement(0);
errorSum += error;
}
return errorSum;
}
private static unsafe int Select4BitIndicesOneSubsetSse41(
ReadOnlySpan<uint> values,
uint endPoint0,
uint endPoint1,
int pBit0,
int pBit1,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
uint alphaMask)
{
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
int errorSum = 0;
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoint0), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoint1), colorDepth, alphaDepth, pBit1);
Vector128<byte> c0Rep = Vector128.Create(c0.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c1Rep = Vector128.Create(c1.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c0c1 = Sse2.UnpackLow(c0Rep, c1Rep);
Vector128<byte> rWeights;
Vector128<byte> lWeights;
fixed (byte* pWeights = BC67Tables.Weights[2], pInvWeights = BC67Tables.InverseWeights[2])
{
rWeights = Sse2.LoadVector128(pWeights);
lWeights = Sse2.LoadVector128(pInvWeights);
}
Vector128<byte> iWeightsLow = Sse2.UnpackLow(lWeights, rWeights);
Vector128<byte> iWeightsHigh = Sse2.UnpackHigh(lWeights, rWeights);
Vector128<byte> iWeights01 = Sse2.UnpackLow(iWeightsLow.AsInt16(), iWeightsLow.AsInt16()).AsByte();
Vector128<byte> iWeights23 = Sse2.UnpackHigh(iWeightsLow.AsInt16(), iWeightsLow.AsInt16()).AsByte();
Vector128<byte> iWeights45 = Sse2.UnpackLow(iWeightsHigh.AsInt16(), iWeightsHigh.AsInt16()).AsByte();
Vector128<byte> iWeights67 = Sse2.UnpackHigh(iWeightsHigh.AsInt16(), iWeightsHigh.AsInt16()).AsByte();
Vector128<byte> iWeights0 = Sse2.UnpackLow(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights1 = Sse2.UnpackHigh(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights2 = Sse2.UnpackLow(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<byte> iWeights3 = Sse2.UnpackHigh(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<byte> iWeights4 = Sse2.UnpackLow(iWeights45.AsInt16(), iWeights45.AsInt16()).AsByte();
Vector128<byte> iWeights5 = Sse2.UnpackHigh(iWeights45.AsInt16(), iWeights45.AsInt16()).AsByte();
Vector128<byte> iWeights6 = Sse2.UnpackLow(iWeights67.AsInt16(), iWeights67.AsInt16()).AsByte();
Vector128<byte> iWeights7 = Sse2.UnpackHigh(iWeights67.AsInt16(), iWeights67.AsInt16()).AsByte();
Vector128<short> pal0 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights0.AsSByte()));
Vector128<short> pal1 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights1.AsSByte()));
Vector128<short> pal2 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights2.AsSByte()));
Vector128<short> pal3 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights3.AsSByte()));
Vector128<short> pal4 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights4.AsSByte()));
Vector128<short> pal5 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights5.AsSByte()));
Vector128<short> pal6 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights6.AsSByte()));
Vector128<short> pal7 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights7.AsSByte()));
for (int i = 0; i < values.Length; i++)
{
uint c = values[i] | alphaMask;
Vector128<short> color = Sse41.ConvertToVector128Int16(Vector128.Create(c).AsByte());
Vector128<short> delta0 = Sse2.Subtract(color, pal0);
Vector128<short> delta1 = Sse2.Subtract(color, pal1);
Vector128<short> delta2 = Sse2.Subtract(color, pal2);
Vector128<short> delta3 = Sse2.Subtract(color, pal3);
Vector128<short> delta4 = Sse2.Subtract(color, pal4);
Vector128<short> delta5 = Sse2.Subtract(color, pal5);
Vector128<short> delta6 = Sse2.Subtract(color, pal6);
Vector128<short> delta7 = Sse2.Subtract(color, pal7);
Vector128<int> deltaSum0 = Sse2.MultiplyAddAdjacent(delta0, delta0);
Vector128<int> deltaSum1 = Sse2.MultiplyAddAdjacent(delta1, delta1);
Vector128<int> deltaSum2 = Sse2.MultiplyAddAdjacent(delta2, delta2);
Vector128<int> deltaSum3 = Sse2.MultiplyAddAdjacent(delta3, delta3);
Vector128<int> deltaSum4 = Sse2.MultiplyAddAdjacent(delta4, delta4);
Vector128<int> deltaSum5 = Sse2.MultiplyAddAdjacent(delta5, delta5);
Vector128<int> deltaSum6 = Sse2.MultiplyAddAdjacent(delta6, delta6);
Vector128<int> deltaSum7 = Sse2.MultiplyAddAdjacent(delta7, delta7);
Vector128<int> deltaSum01 = Ssse3.HorizontalAdd(deltaSum0, deltaSum1);
Vector128<int> deltaSum23 = Ssse3.HorizontalAdd(deltaSum2, deltaSum3);
Vector128<int> deltaSum45 = Ssse3.HorizontalAdd(deltaSum4, deltaSum5);
Vector128<int> deltaSum67 = Ssse3.HorizontalAdd(deltaSum6, deltaSum7);
Vector128<ushort> delta0123 = Sse41.PackUnsignedSaturate(deltaSum01, deltaSum23);
Vector128<ushort> delta4567 = Sse41.PackUnsignedSaturate(deltaSum45, deltaSum67);
Vector128<ushort> min0123 = Sse41.MinHorizontal(delta0123);
Vector128<ushort> min4567 = Sse41.MinHorizontal(delta4567);
ushort minPos0123 = min0123.GetElement(0);
ushort minPos4567 = min4567.GetElement(0);
if (minPos4567 < minPos0123)
{
errorSum += minPos4567;
}
else
{
errorSum += minPos0123;
}
}
return errorSum;
}
private static int SelectIndicesFallback(
ReadOnlySpan<uint> values,
uint endPoint0,
uint endPoint1,
int pBit0,
int pBit1,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
uint alphaMask)
{
int errorSum = 0;
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
Span<uint> palette = stackalloc uint[indexCount];
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoint0), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoint1), colorDepth, alphaDepth, pBit1);
Unsafe.As<RgbaColor8, uint>(ref c0) |= alphaMaskForPalette;
Unsafe.As<RgbaColor8, uint>(ref c1) |= alphaMaskForPalette;
palette[0] = c0.ToUInt32();
palette[indexCount - 1] = c1.ToUInt32();
for (int j = 1; j < indexCount - 1; j++)
{
palette[j] = Interpolate(c0, c1, j, indexBitCount).ToUInt32();
}
for (int i = 0; i < values.Length; i++)
{
uint color = values[i] | alphaMask;
int bestMatchScore = int.MaxValue;
int bestMatchIndex = 0;
for (int j = 0; j < indexCount; j++)
{
int score = SquaredDifference(
RgbaColor8.FromUInt32(color).GetColor32(),
RgbaColor8.FromUInt32(palette[j]).GetColor32());
if (score < bestMatchScore)
{
bestMatchScore = score;
bestMatchIndex = j;
}
}
errorSum += bestMatchScore;
}
return errorSum;
}
public static int SelectIndices(
ReadOnlySpan<uint> tile,
int w,
int h,
ReadOnlySpan<uint> endPoints0,
ReadOnlySpan<uint> endPoints1,
ReadOnlySpan<int> pBitValues,
Span<byte> indices,
int subsetCount,
int partition,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
int pBits,
uint alphaMask)
{
if (Sse41.IsSupported)
{
if (indexBitCount == 2)
{
return Select2BitIndicesSse41(
tile,
w,
h,
endPoints0,
endPoints1,
pBitValues,
indices,
subsetCount,
partition,
colorDepth,
alphaDepth,
pBits,
alphaMask);
}
else if (indexBitCount == 3)
{
return Select3BitIndicesSse41(
tile,
w,
h,
endPoints0,
endPoints1,
pBitValues,
indices,
subsetCount,
partition,
colorDepth,
alphaDepth,
pBits,
alphaMask);
}
else if (indexBitCount == 4)
{
Debug.Assert(subsetCount == 1);
return Select4BitIndicesOneSubsetSse41(
tile,
w,
h,
endPoints0[0],
endPoints1[0],
pBitValues,
indices,
partition,
colorDepth,
alphaDepth,
pBits,
alphaMask);
}
}
return SelectIndicesFallback(
tile,
w,
h,
endPoints0,
endPoints1,
pBitValues,
indices,
subsetCount,
partition,
indexBitCount,
indexCount,
colorDepth,
alphaDepth,
pBits,
alphaMask);
}
private static unsafe int Select2BitIndicesSse41(
ReadOnlySpan<uint> tile,
int w,
int h,
ReadOnlySpan<uint> endPoints0,
ReadOnlySpan<uint> endPoints1,
ReadOnlySpan<int> pBitValues,
Span<byte> indices,
int subsetCount,
int partition,
int colorDepth,
int alphaDepth,
int pBits,
uint alphaMask)
{
byte[] partitionTable = BC67Tables.PartitionTable[subsetCount - 1][partition];
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
int errorSum = 0;
for (int subset = 0; subset < subsetCount; subset++)
{
int pBit0 = -1, pBit1 = -1;
if (pBits == subsetCount)
{
pBit0 = pBit1 = pBitValues[subset];
}
else if (pBits != 0)
{
pBit0 = pBitValues[subset * 2];
pBit1 = pBitValues[subset * 2 + 1];
}
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoints0[subset]), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoints1[subset]), colorDepth, alphaDepth, pBit1);
Vector128<byte> c0Rep = Vector128.Create(c0.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c1Rep = Vector128.Create(c1.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c0c1 = Sse2.UnpackLow(c0Rep, c1Rep);
Vector128<byte> rWeights;
Vector128<byte> lWeights;
fixed (byte* pWeights = BC67Tables.Weights[0], pInvWeights = BC67Tables.InverseWeights[0])
{
rWeights = Sse2.LoadScalarVector128((uint*)pWeights).AsByte();
lWeights = Sse2.LoadScalarVector128((uint*)pInvWeights).AsByte();
}
Vector128<byte> iWeights = Sse2.UnpackLow(lWeights, rWeights);
Vector128<byte> iWeights01 = Sse2.UnpackLow(iWeights.AsInt16(), iWeights.AsInt16()).AsByte();
Vector128<byte> iWeights0 = Sse2.UnpackLow(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights1 = Sse2.UnpackHigh(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<short> pal0 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights0.AsSByte()));
Vector128<short> pal1 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights1.AsSByte()));
int i = 0;
for (int ty = 0; ty < h; ty++)
{
for (int tx = 0; tx < w; tx++, i++)
{
int tileOffset = ty * 4 + tx;
if (partitionTable[tileOffset] != subset)
{
continue;
}
uint c = tile[i] | alphaMask;
Vector128<short> color = Sse41.ConvertToVector128Int16(Vector128.Create(c).AsByte());
Vector128<short> delta0 = Sse2.Subtract(color, pal0);
Vector128<short> delta1 = Sse2.Subtract(color, pal1);
Vector128<int> deltaSum0 = Sse2.MultiplyAddAdjacent(delta0, delta0);
Vector128<int> deltaSum1 = Sse2.MultiplyAddAdjacent(delta1, delta1);
Vector128<int> deltaSum01 = Ssse3.HorizontalAdd(deltaSum0, deltaSum1);
Vector128<ushort> delta = Sse41.PackUnsignedSaturate(deltaSum01, deltaSum01);
Vector128<ushort> min = Sse41.MinHorizontal(delta);
uint minPos = min.AsUInt32().GetElement(0);
ushort error = (ushort)minPos;
uint index = minPos >> 16;
indices[tileOffset] = (byte)index;
errorSum += error;
}
}
}
return errorSum;
}
private static unsafe int Select3BitIndicesSse41(
ReadOnlySpan<uint> tile,
int w,
int h,
ReadOnlySpan<uint> endPoints0,
ReadOnlySpan<uint> endPoints1,
ReadOnlySpan<int> pBitValues,
Span<byte> indices,
int subsetCount,
int partition,
int colorDepth,
int alphaDepth,
int pBits,
uint alphaMask)
{
byte[] partitionTable = BC67Tables.PartitionTable[subsetCount - 1][partition];
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
int errorSum = 0;
for (int subset = 0; subset < subsetCount; subset++)
{
int pBit0 = -1, pBit1 = -1;
if (pBits == subsetCount)
{
pBit0 = pBit1 = pBitValues[subset];
}
else if (pBits != 0)
{
pBit0 = pBitValues[subset * 2];
pBit1 = pBitValues[subset * 2 + 1];
}
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoints0[subset]), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoints1[subset]), colorDepth, alphaDepth, pBit1);
Vector128<byte> c0Rep = Vector128.Create(c0.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c1Rep = Vector128.Create(c1.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c0c1 = Sse2.UnpackLow(c0Rep, c1Rep);
Vector128<byte> rWeights;
Vector128<byte> lWeights;
fixed (byte* pWeights = BC67Tables.Weights[1], pInvWeights = BC67Tables.InverseWeights[1])
{
rWeights = Sse2.LoadScalarVector128((ulong*)pWeights).AsByte();
lWeights = Sse2.LoadScalarVector128((ulong*)pInvWeights).AsByte();
}
Vector128<byte> iWeights = Sse2.UnpackLow(lWeights, rWeights);
Vector128<byte> iWeights01 = Sse2.UnpackLow(iWeights.AsInt16(), iWeights.AsInt16()).AsByte();
Vector128<byte> iWeights23 = Sse2.UnpackHigh(iWeights.AsInt16(), iWeights.AsInt16()).AsByte();
Vector128<byte> iWeights0 = Sse2.UnpackLow(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights1 = Sse2.UnpackHigh(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights2 = Sse2.UnpackLow(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<byte> iWeights3 = Sse2.UnpackHigh(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<short> pal0 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights0.AsSByte()));
Vector128<short> pal1 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights1.AsSByte()));
Vector128<short> pal2 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights2.AsSByte()));
Vector128<short> pal3 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights3.AsSByte()));
int i = 0;
for (int ty = 0; ty < h; ty++)
{
for (int tx = 0; tx < w; tx++, i++)
{
int tileOffset = ty * 4 + tx;
if (partitionTable[tileOffset] != subset)
{
continue;
}
uint c = tile[i] | alphaMask;
Vector128<short> color = Sse41.ConvertToVector128Int16(Vector128.Create(c).AsByte());
Vector128<short> delta0 = Sse2.Subtract(color, pal0);
Vector128<short> delta1 = Sse2.Subtract(color, pal1);
Vector128<short> delta2 = Sse2.Subtract(color, pal2);
Vector128<short> delta3 = Sse2.Subtract(color, pal3);
Vector128<int> deltaSum0 = Sse2.MultiplyAddAdjacent(delta0, delta0);
Vector128<int> deltaSum1 = Sse2.MultiplyAddAdjacent(delta1, delta1);
Vector128<int> deltaSum2 = Sse2.MultiplyAddAdjacent(delta2, delta2);
Vector128<int> deltaSum3 = Sse2.MultiplyAddAdjacent(delta3, delta3);
Vector128<int> deltaSum01 = Ssse3.HorizontalAdd(deltaSum0, deltaSum1);
Vector128<int> deltaSum23 = Ssse3.HorizontalAdd(deltaSum2, deltaSum3);
Vector128<ushort> delta = Sse41.PackUnsignedSaturate(deltaSum01, deltaSum23);
Vector128<ushort> min = Sse41.MinHorizontal(delta);
uint minPos = min.AsUInt32().GetElement(0);
ushort error = (ushort)minPos;
uint index = minPos >> 16;
indices[tileOffset] = (byte)index;
errorSum += error;
}
}
}
return errorSum;
}
private static unsafe int Select4BitIndicesOneSubsetSse41(
ReadOnlySpan<uint> tile,
int w,
int h,
uint endPoint0,
uint endPoint1,
ReadOnlySpan<int> pBitValues,
Span<byte> indices,
int partition,
int colorDepth,
int alphaDepth,
int pBits,
uint alphaMask)
{
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
int errorSum = 0;
int pBit0 = -1, pBit1 = -1;
if (pBits != 0)
{
pBit0 = pBitValues[0];
pBit1 = pBitValues[1];
}
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoint0), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoint1), colorDepth, alphaDepth, pBit1);
Vector128<byte> c0Rep = Vector128.Create(c0.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c1Rep = Vector128.Create(c1.ToUInt32() | alphaMaskForPalette).AsByte();
Vector128<byte> c0c1 = Sse2.UnpackLow(c0Rep, c1Rep);
Vector128<byte> rWeights;
Vector128<byte> lWeights;
fixed (byte* pWeights = BC67Tables.Weights[2], pInvWeights = BC67Tables.InverseWeights[2])
{
rWeights = Sse2.LoadVector128(pWeights);
lWeights = Sse2.LoadVector128(pInvWeights);
}
Vector128<byte> iWeightsLow = Sse2.UnpackLow(lWeights, rWeights);
Vector128<byte> iWeightsHigh = Sse2.UnpackHigh(lWeights, rWeights);
Vector128<byte> iWeights01 = Sse2.UnpackLow(iWeightsLow.AsInt16(), iWeightsLow.AsInt16()).AsByte();
Vector128<byte> iWeights23 = Sse2.UnpackHigh(iWeightsLow.AsInt16(), iWeightsLow.AsInt16()).AsByte();
Vector128<byte> iWeights45 = Sse2.UnpackLow(iWeightsHigh.AsInt16(), iWeightsHigh.AsInt16()).AsByte();
Vector128<byte> iWeights67 = Sse2.UnpackHigh(iWeightsHigh.AsInt16(), iWeightsHigh.AsInt16()).AsByte();
Vector128<byte> iWeights0 = Sse2.UnpackLow(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights1 = Sse2.UnpackHigh(iWeights01.AsInt16(), iWeights01.AsInt16()).AsByte();
Vector128<byte> iWeights2 = Sse2.UnpackLow(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<byte> iWeights3 = Sse2.UnpackHigh(iWeights23.AsInt16(), iWeights23.AsInt16()).AsByte();
Vector128<byte> iWeights4 = Sse2.UnpackLow(iWeights45.AsInt16(), iWeights45.AsInt16()).AsByte();
Vector128<byte> iWeights5 = Sse2.UnpackHigh(iWeights45.AsInt16(), iWeights45.AsInt16()).AsByte();
Vector128<byte> iWeights6 = Sse2.UnpackLow(iWeights67.AsInt16(), iWeights67.AsInt16()).AsByte();
Vector128<byte> iWeights7 = Sse2.UnpackHigh(iWeights67.AsInt16(), iWeights67.AsInt16()).AsByte();
Vector128<short> pal0 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights0.AsSByte()));
Vector128<short> pal1 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights1.AsSByte()));
Vector128<short> pal2 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights2.AsSByte()));
Vector128<short> pal3 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights3.AsSByte()));
Vector128<short> pal4 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights4.AsSByte()));
Vector128<short> pal5 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights5.AsSByte()));
Vector128<short> pal6 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights6.AsSByte()));
Vector128<short> pal7 = ShiftRoundToNearest(Ssse3.MultiplyAddAdjacent(c0c1, iWeights7.AsSByte()));
int i = 0;
for (int ty = 0; ty < h; ty++)
{
for (int tx = 0; tx < w; tx++, i++)
{
uint c = tile[i] | alphaMask;
Vector128<short> color = Sse41.ConvertToVector128Int16(Vector128.Create(c).AsByte());
Vector128<short> delta0 = Sse2.Subtract(color, pal0);
Vector128<short> delta1 = Sse2.Subtract(color, pal1);
Vector128<short> delta2 = Sse2.Subtract(color, pal2);
Vector128<short> delta3 = Sse2.Subtract(color, pal3);
Vector128<short> delta4 = Sse2.Subtract(color, pal4);
Vector128<short> delta5 = Sse2.Subtract(color, pal5);
Vector128<short> delta6 = Sse2.Subtract(color, pal6);
Vector128<short> delta7 = Sse2.Subtract(color, pal7);
Vector128<int> deltaSum0 = Sse2.MultiplyAddAdjacent(delta0, delta0);
Vector128<int> deltaSum1 = Sse2.MultiplyAddAdjacent(delta1, delta1);
Vector128<int> deltaSum2 = Sse2.MultiplyAddAdjacent(delta2, delta2);
Vector128<int> deltaSum3 = Sse2.MultiplyAddAdjacent(delta3, delta3);
Vector128<int> deltaSum4 = Sse2.MultiplyAddAdjacent(delta4, delta4);
Vector128<int> deltaSum5 = Sse2.MultiplyAddAdjacent(delta5, delta5);
Vector128<int> deltaSum6 = Sse2.MultiplyAddAdjacent(delta6, delta6);
Vector128<int> deltaSum7 = Sse2.MultiplyAddAdjacent(delta7, delta7);
Vector128<int> deltaSum01 = Ssse3.HorizontalAdd(deltaSum0, deltaSum1);
Vector128<int> deltaSum23 = Ssse3.HorizontalAdd(deltaSum2, deltaSum3);
Vector128<int> deltaSum45 = Ssse3.HorizontalAdd(deltaSum4, deltaSum5);
Vector128<int> deltaSum67 = Ssse3.HorizontalAdd(deltaSum6, deltaSum7);
Vector128<ushort> delta0123 = Sse41.PackUnsignedSaturate(deltaSum01, deltaSum23);
Vector128<ushort> delta4567 = Sse41.PackUnsignedSaturate(deltaSum45, deltaSum67);
Vector128<ushort> min0123 = Sse41.MinHorizontal(delta0123);
Vector128<ushort> min4567 = Sse41.MinHorizontal(delta4567);
uint minPos0123 = min0123.AsUInt32().GetElement(0);
uint minPos4567 = min4567.AsUInt32().GetElement(0);
if ((ushort)minPos4567 < (ushort)minPos0123)
{
errorSum += (ushort)minPos4567;
indices[ty * 4 + tx] = (byte)(8 + (minPos4567 >> 16));
}
else
{
errorSum += (ushort)minPos0123;
indices[ty * 4 + tx] = (byte)(minPos0123 >> 16);
}
}
}
return errorSum;
}
private static Vector128<short> ShiftRoundToNearest(Vector128<short> x)
{
return Sse2.ShiftRightLogical(Sse2.Add(x, Vector128.Create((short)32)), 6);
}
private static int SelectIndicesFallback(
ReadOnlySpan<uint> tile,
int w,
int h,
ReadOnlySpan<uint> endPoints0,
ReadOnlySpan<uint> endPoints1,
ReadOnlySpan<int> pBitValues,
Span<byte> indices,
int subsetCount,
int partition,
int indexBitCount,
int indexCount,
int colorDepth,
int alphaDepth,
int pBits,
uint alphaMask)
{
int errorSum = 0;
uint alphaMaskForPalette = alphaMask;
if (alphaDepth == 0)
{
alphaMaskForPalette |= new RgbaColor8(0, 0, 0, 255).ToUInt32();
}
Span<uint> palette = stackalloc uint[subsetCount * indexCount];
for (int subset = 0; subset < subsetCount; subset++)
{
int palBase = subset * indexCount;
int pBit0 = -1, pBit1 = -1;
if (pBits == subsetCount)
{
pBit0 = pBit1 = pBitValues[subset];
}
else if (pBits != 0)
{
pBit0 = pBitValues[subset * 2];
pBit1 = pBitValues[subset * 2 + 1];
}
RgbaColor8 c0 = Quantize(RgbaColor8.FromUInt32(endPoints0[subset]), colorDepth, alphaDepth, pBit0);
RgbaColor8 c1 = Quantize(RgbaColor8.FromUInt32(endPoints1[subset]), colorDepth, alphaDepth, pBit1);
Unsafe.As<RgbaColor8, uint>(ref c0) |= alphaMaskForPalette;
Unsafe.As<RgbaColor8, uint>(ref c1) |= alphaMaskForPalette;
palette[palBase + 0] = c0.ToUInt32();
palette[palBase + indexCount - 1] = c1.ToUInt32();
for (int j = 1; j < indexCount - 1; j++)
{
palette[palBase + j] = Interpolate(c0, c1, j, indexBitCount).ToUInt32();
}
}
int i = 0;
for (int ty = 0; ty < h; ty++)
{
for (int tx = 0; tx < w; tx++)
{
int subset = BC67Tables.PartitionTable[subsetCount - 1][partition][ty * 4 + tx];
uint color = tile[i++] | alphaMask;
int bestMatchScore = int.MaxValue;
int bestMatchIndex = 0;
for (int j = 0; j < indexCount; j++)
{
int score = SquaredDifference(
RgbaColor8.FromUInt32(color).GetColor32(),
RgbaColor8.FromUInt32(palette[subset * indexCount + j]).GetColor32());
if (score < bestMatchScore)
{
bestMatchScore = score;
bestMatchIndex = j;
}
}
indices[ty * 4 + tx] = (byte)bestMatchIndex;
errorSum += bestMatchScore;
}
}
return errorSum;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int SquaredDifference(RgbaColor32 color1, RgbaColor32 color2)
{
RgbaColor32 delta = color1 - color2;
return RgbaColor32.Dot(delta, delta);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static RgbaColor8 Interpolate(RgbaColor8 color1, RgbaColor8 color2, int weightIndex, int indexBitCount)
{
return Interpolate(color1.GetColor32(), color2.GetColor32(), weightIndex, indexBitCount).GetColor8();
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static RgbaColor32 Interpolate(RgbaColor32 color1, RgbaColor32 color2, int weightIndex, int indexBitCount)
{
Debug.Assert(indexBitCount >= 2 && indexBitCount <= 4);
int weight = (((weightIndex << 7) / ((1 << indexBitCount) - 1)) + 1) >> 1;
RgbaColor32 weightV = new RgbaColor32(weight);
RgbaColor32 invWeightV = new RgbaColor32(64 - weight);
return (color1 * invWeightV + color2 * weightV + new RgbaColor32(32)) >> 6;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static RgbaColor32 Interpolate(
RgbaColor32 color1,
RgbaColor32 color2,
int colorWeightIndex,
int alphaWeightIndex,
int colorIndexBitCount,
int alphaIndexBitCount)
{
Debug.Assert(colorIndexBitCount >= 2 && colorIndexBitCount <= 4);
Debug.Assert(alphaIndexBitCount >= 2 && alphaIndexBitCount <= 4);
int colorWeight = BC67Tables.Weights[colorIndexBitCount - 2][colorWeightIndex];
int alphaWeight = BC67Tables.Weights[alphaIndexBitCount - 2][alphaWeightIndex];
RgbaColor32 weightV = new RgbaColor32(colorWeight);
weightV.A = alphaWeight;
RgbaColor32 invWeightV = new RgbaColor32(64) - weightV;
return (color1 * invWeightV + color2 * weightV + new RgbaColor32(32)) >> 6;
}
public static RgbaColor8 Quantize(RgbaColor8 color, int colorBits, int alphaBits, int pBit = -1)
{
if (alphaBits == 0)
{
int colorShift = 8 - colorBits;
uint c;
if (pBit >= 0)
{
byte[] lutColor = _quantizationLut[colorBits - 4];
Debug.Assert(pBit <= 1);
int high = pBit << 8;
uint mask = (0xffu >> (colorBits + 1)) * 0x10101;
c = lutColor[color.R | high];
c |= (uint)lutColor[color.G | high] << 8;
c |= (uint)lutColor[color.B | high] << 16;
c <<= colorShift;
c |= (c >> (colorBits + 1)) & mask;
c |= ((uint)pBit * 0x10101) << (colorShift - 1);
}
else
{
byte[] lutColor = _quantizationLutNoPBit[colorBits - 4];
uint mask = (0xffu >> colorBits) * 0x10101;
c = lutColor[color.R];
c |= (uint)lutColor[color.G] << 8;
c |= (uint)lutColor[color.B] << 16;
c <<= colorShift;
c |= (c >> colorBits) & mask;
}
c |= (uint)color.A << 24;
return RgbaColor8.FromUInt32(c);
}
return QuantizeFallback(color, colorBits, alphaBits, pBit);
}
private static RgbaColor8 QuantizeFallback(RgbaColor8 color, int colorBits, int alphaBits, int pBit = -1)
{
byte r = UnquantizeComponent(QuantizeComponent(color.R, colorBits, pBit), colorBits, pBit);
byte g = UnquantizeComponent(QuantizeComponent(color.G, colorBits, pBit), colorBits, pBit);
byte b = UnquantizeComponent(QuantizeComponent(color.B, colorBits, pBit), colorBits, pBit);
byte a = alphaBits == 0 ? color.A : UnquantizeComponent(QuantizeComponent(color.A, alphaBits, pBit), alphaBits, pBit);
return new RgbaColor8(r, g, b, a);
}
public static byte QuantizeComponent(byte component, int bits, int pBit = -1)
{
return pBit >= 0 ? _quantizationLut[bits - 4][component | (pBit << 8)] : _quantizationLutNoPBit[bits - 4][component];
}
private static byte QuantizeComponentForLut(byte component, int bits, int pBit = -1)
{
int shift = 8 - bits;
int fill = component >> bits;
if (pBit >= 0)
{
Debug.Assert(pBit <= 1);
fill >>= 1;
fill |= pBit << (shift - 1);
}
int q1 = component >> shift;
int q2 = Math.Max(q1 - 1, 0);
int q3 = Math.Min(q1 + 1, (1 << bits) - 1);
int delta1 = FastAbs(((q1 << shift) | fill) - component);
int delta2 = component - ((q2 << shift) | fill);
int delta3 = ((q3 << shift) | fill) - component;
if (delta1 < delta2 && delta1 < delta3)
{
return (byte)q1;
}
else if (delta2 < delta3)
{
return (byte)q2;
}
else
{
return (byte)q3;
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static int FastAbs(int x)
{
int sign = x >> 31;
return (x + sign) ^ sign;
}
private static byte UnquantizeComponent(byte component, int bits, int pBit)
{
int shift = 8 - bits;
int value = component << shift;
if (pBit >= 0)
{
Debug.Assert(pBit <= 1);
value |= value >> (bits + 1);
value |= pBit << (shift - 1);
}
else
{
value |= value >> bits;
}
return (byte)value;
}
}
}