mirror of
https://github.com/ryujinx-mirror/ryujinx.git
synced 2024-12-22 22:05:45 +00:00
a731ab3a2a
* Start of the ARMeilleure project * Refactoring around the old IRAdapter, now renamed to PreAllocator * Optimize the LowestBitSet method * Add CLZ support and fix CLS implementation * Add missing Equals and GetHashCode overrides on some structs, misc small tweaks * Implement the ByteSwap IR instruction, and some refactoring on the assembler * Implement the DivideUI IR instruction and fix 64-bits IDIV * Correct constant operand type on CSINC * Move division instructions implementation to InstEmitDiv * Fix destination type for the ConditionalSelect IR instruction * Implement UMULH and SMULH, with new IR instructions * Fix some issues with shift instructions * Fix constant types for BFM instructions * Fix up new tests using the new V128 struct * Update tests * Move DIV tests to a separate file * Add support for calls, and some instructions that depends on them * Start adding support for SIMD & FP types, along with some of the related ARM instructions * Fix some typos and the divide instruction with FP operands * Fix wrong method call on Clz_V * Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes * Implement SIMD logical instructions and more misc. fixes * Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations * Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes * Implement SIMD shift instruction and fix Dup_V * Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table * Fix check with tolerance on tester * Implement FP & SIMD comparison instructions, and some fixes * Update FCVT (Scalar) encoding on the table to support the Half-float variants * Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes * Use old memory access methods, made a start on SIMD memory insts support, some fixes * Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes * Fix arguments count with struct return values, other fixes * More instructions * Misc. fixes and integrate LDj3SNuD fixes * Update tests * Add a faster linear scan allocator, unwinding support on windows, and other changes * Update Ryujinx.HLE * Update Ryujinx.Graphics * Fix V128 return pointer passing, RCX is clobbered * Update Ryujinx.Tests * Update ITimeZoneService * Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks * Use generic GetFunctionPointerForDelegate method and other tweaks * Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics * Remove some unused code on the assembler * Fix REX.W prefix regression on float conversion instructions, add some sort of profiler * Add hardware capability detection * Fix regression on Sha1h and revert Fcm** changes * Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator * Fix silly mistake introduced on last commit on CpuId * Generate inline stack probes when the stack allocation is too large * Initial support for the System-V ABI * Support multiple destination operands * Fix SSE2 VectorInsert8 path, and other fixes * Change placement of XMM callee save and restore code to match other compilers * Rename Dest to Destination and Inst to Instruction * Fix a regression related to calls and the V128 type * Add an extra space on comments to match code style * Some refactoring * Fix vector insert FP32 SSE2 path * Port over the ARM32 instructions * Avoid memory protection races on JIT Cache * Another fix on VectorInsert FP32 (thanks to LDj3SNuD * Float operands don't need to use the same register when VEX is supported * Add a new register allocator, higher quality code for hot code (tier up), and other tweaks * Some nits, small improvements on the pre allocator * CpuThreadState is gone * Allow changing CPU emulators with a config entry * Add runtime identifiers on the ARMeilleure project * Allow switching between CPUs through a config entry (pt. 2) * Change win10-x64 to win-x64 on projects * Update the Ryujinx project to use ARMeilleure * Ensure that the selected register is valid on the hybrid allocator * Allow exiting on returns to 0 (should fix test regression) * Remove register assignments for most used variables on the hybrid allocator * Do not use fixed registers as spill temp * Add missing namespace and remove unneeded using * Address PR feedback * Fix types, etc * Enable AssumeStrictAbiCompliance by default * Ensure that Spill and Fill don't load or store any more than necessary
712 lines
No EOL
22 KiB
C#
712 lines
No EOL
22 KiB
C#
using ARMeilleure.Decoders;
|
|
using ARMeilleure.IntermediateRepresentation;
|
|
using ARMeilleure.State;
|
|
using ARMeilleure.Translation;
|
|
using System;
|
|
|
|
using static ARMeilleure.Instructions.InstEmitHelper;
|
|
using static ARMeilleure.Instructions.InstEmitSimdHelper;
|
|
using static ARMeilleure.IntermediateRepresentation.OperandHelper;
|
|
|
|
namespace ARMeilleure.Instructions
|
|
{
|
|
using Func2I = Func<Operand, Operand, Operand>;
|
|
|
|
static partial class InstEmit
|
|
{
|
|
public static void Cmeq_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareEqual(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmeq_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.UseSse41)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m;
|
|
|
|
if (op is OpCodeSimdReg binOp)
|
|
{
|
|
m = GetVec(binOp.Rm);
|
|
}
|
|
else
|
|
{
|
|
m = context.VectorZero();
|
|
}
|
|
|
|
Intrinsic cmpInst = X86PcmpeqInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(cmpInst, n, m);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareEqual(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmge_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreaterOrEqual(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmge_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.UseSse42)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m;
|
|
|
|
if (op is OpCodeSimdReg binOp)
|
|
{
|
|
m = GetVec(binOp.Rm);
|
|
}
|
|
else
|
|
{
|
|
m = context.VectorZero();
|
|
}
|
|
|
|
Intrinsic cmpInst = X86PcmpgtInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(cmpInst, m, n);
|
|
|
|
Operand mask = X86GetAllElements(context, -1L);
|
|
|
|
res = context.AddIntrinsic(Intrinsic.X86Pandn, res, mask);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreaterOrEqual(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmgt_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreater(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmgt_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.UseSse42)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m;
|
|
|
|
if (op is OpCodeSimdReg binOp)
|
|
{
|
|
m = GetVec(binOp.Rm);
|
|
}
|
|
else
|
|
{
|
|
m = context.VectorZero();
|
|
}
|
|
|
|
Intrinsic cmpInst = X86PcmpgtInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(cmpInst, n, m);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreater(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmhi_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreaterUI(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmhi_V(ArmEmitterContext context)
|
|
{
|
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
|
|
|
if (Optimizations.UseSse41 && op.Size < 3)
|
|
{
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m = GetVec(op.Rm);
|
|
|
|
Intrinsic maxInst = X86PmaxuInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(maxInst, m, n);
|
|
|
|
Intrinsic cmpInst = X86PcmpeqInstruction[op.Size];
|
|
|
|
res = context.AddIntrinsic(cmpInst, res, m);
|
|
|
|
Operand mask = X86GetAllElements(context, -1L);
|
|
|
|
res = context.AddIntrinsic(Intrinsic.X86Pandn, res, mask);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreaterUI(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmhs_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreaterOrEqualUI(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmhs_V(ArmEmitterContext context)
|
|
{
|
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
|
|
|
if (Optimizations.UseSse41 && op.Size < 3)
|
|
{
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m = GetVec(op.Rm);
|
|
|
|
Intrinsic maxInst = X86PmaxuInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(maxInst, n, m);
|
|
|
|
Intrinsic cmpInst = X86PcmpeqInstruction[op.Size];
|
|
|
|
res = context.AddIntrinsic(cmpInst, res, n);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareGreaterOrEqualUI(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmle_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareLessOrEqual(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmle_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.UseSse42)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand n = GetVec(op.Rn);
|
|
|
|
Intrinsic cmpInst = X86PcmpgtInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(cmpInst, n, context.VectorZero());
|
|
|
|
Operand mask = X86GetAllElements(context, -1L);
|
|
|
|
res = context.AddIntrinsic(Intrinsic.X86Pandn, res, mask);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareLessOrEqual(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmlt_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareLess(op1, op2), scalar: true);
|
|
}
|
|
|
|
public static void Cmlt_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.UseSse42)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand n = GetVec(op.Rn);
|
|
|
|
Intrinsic cmpInst = X86PcmpgtInstruction[op.Size];
|
|
|
|
Operand res = context.AddIntrinsic(cmpInst, context.VectorZero(), n);
|
|
|
|
if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOp(context, (op1, op2) => context.ICompareLess(op1, op2), scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Cmtst_S(ArmEmitterContext context)
|
|
{
|
|
EmitCmtstOp(context, scalar: true);
|
|
}
|
|
|
|
public static void Cmtst_V(ArmEmitterContext context)
|
|
{
|
|
EmitCmtstOp(context, scalar: false);
|
|
}
|
|
|
|
public static void Fccmp_S(ArmEmitterContext context)
|
|
{
|
|
EmitFccmpOrFccmpe(context, signalNaNs: false);
|
|
}
|
|
|
|
public static void Fccmpe_S(ArmEmitterContext context)
|
|
{
|
|
EmitFccmpOrFccmpe(context, signalNaNs: true);
|
|
}
|
|
|
|
public static void Fcmeq_S(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.Equal, scalar: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareEQ, SoftFloat64.FPCompareEQ, scalar: true);
|
|
}
|
|
}
|
|
|
|
public static void Fcmeq_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.Equal, scalar: false);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareEQ, SoftFloat64.FPCompareEQ, scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Fcmge_S(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareGE, SoftFloat64.FPCompareGE, scalar: true);
|
|
}
|
|
}
|
|
|
|
public static void Fcmge_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: false);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareGE, SoftFloat64.FPCompareGE, scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Fcmgt_S(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareGT, SoftFloat64.FPCompareGT, scalar: true);
|
|
}
|
|
}
|
|
|
|
public static void Fcmgt_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: false);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareGT, SoftFloat64.FPCompareGT, scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Fcmle_S(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: true, isLeOrLt: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareLE, SoftFloat64.FPCompareLE, scalar: true);
|
|
}
|
|
}
|
|
|
|
public static void Fcmle_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: false, isLeOrLt: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareLE, SoftFloat64.FPCompareLE, scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Fcmlt_S(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: true, isLeOrLt: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareLT, SoftFloat64.FPCompareLT, scalar: true);
|
|
}
|
|
}
|
|
|
|
public static void Fcmlt_V(ArmEmitterContext context)
|
|
{
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: false, isLeOrLt: true);
|
|
}
|
|
else
|
|
{
|
|
EmitCmpOpF(context, SoftFloat32.FPCompareLT, SoftFloat64.FPCompareLT, scalar: false);
|
|
}
|
|
}
|
|
|
|
public static void Fcmp_S(ArmEmitterContext context)
|
|
{
|
|
EmitFcmpOrFcmpe(context, signalNaNs: false);
|
|
}
|
|
|
|
public static void Fcmpe_S(ArmEmitterContext context)
|
|
{
|
|
EmitFcmpOrFcmpe(context, signalNaNs: true);
|
|
}
|
|
|
|
public static void EmitFccmpOrFccmpe(ArmEmitterContext context, bool signalNaNs)
|
|
{
|
|
OpCodeSimdFcond op = (OpCodeSimdFcond)context.CurrOp;
|
|
|
|
Operand lblTrue = Label();
|
|
Operand lblEnd = Label();
|
|
|
|
context.BranchIfTrue(lblTrue, InstEmitFlowHelper.GetCondTrue(context, op.Cond));
|
|
|
|
EmitSetNzcv(context, Const(op.Nzcv));
|
|
|
|
context.Branch(lblEnd);
|
|
|
|
context.MarkLabel(lblTrue);
|
|
|
|
EmitFcmpOrFcmpe(context, signalNaNs);
|
|
|
|
context.MarkLabel(lblEnd);
|
|
}
|
|
|
|
private static void EmitFcmpOrFcmpe(ArmEmitterContext context, bool signalNaNs)
|
|
{
|
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
|
|
|
const int cmpOrdered = 7;
|
|
|
|
bool cmpWithZero = !(op is OpCodeSimdFcond) ? op.Bit3 : false;
|
|
|
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
|
{
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m = cmpWithZero ? context.VectorZero() : GetVec(op.Rm);
|
|
|
|
Operand lblNaN = Label();
|
|
Operand lblEnd = Label();
|
|
|
|
if (op.Size == 0)
|
|
{
|
|
Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpss, n, m, Const(cmpOrdered));
|
|
|
|
Operand isOrdered = context.VectorExtract16(ordMask, 0);
|
|
|
|
context.BranchIfFalse(lblNaN, isOrdered);
|
|
|
|
Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comissge, n, m);
|
|
Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisseq, n, m);
|
|
Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisslt, n, m);
|
|
|
|
SetFlag(context, PState.VFlag, Const(0));
|
|
SetFlag(context, PState.CFlag, cf);
|
|
SetFlag(context, PState.ZFlag, zf);
|
|
SetFlag(context, PState.NFlag, nf);
|
|
}
|
|
else /* if (op.Size == 1) */
|
|
{
|
|
Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, m, Const(cmpOrdered));
|
|
|
|
Operand isOrdered = context.VectorExtract16(ordMask, 0);
|
|
|
|
context.BranchIfFalse(lblNaN, isOrdered);
|
|
|
|
Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comisdge, n, m);
|
|
Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisdeq, n, m);
|
|
Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisdlt, n, m);
|
|
|
|
SetFlag(context, PState.VFlag, Const(0));
|
|
SetFlag(context, PState.CFlag, cf);
|
|
SetFlag(context, PState.ZFlag, zf);
|
|
SetFlag(context, PState.NFlag, nf);
|
|
}
|
|
|
|
context.Branch(lblEnd);
|
|
|
|
context.MarkLabel(lblNaN);
|
|
|
|
SetFlag(context, PState.VFlag, Const(1));
|
|
SetFlag(context, PState.CFlag, Const(1));
|
|
SetFlag(context, PState.ZFlag, Const(0));
|
|
SetFlag(context, PState.NFlag, Const(0));
|
|
|
|
context.MarkLabel(lblEnd);
|
|
}
|
|
else
|
|
{
|
|
OperandType type = op.Size != 0 ? OperandType.FP64 : OperandType.FP32;
|
|
|
|
Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0);
|
|
Operand me;
|
|
|
|
if (cmpWithZero)
|
|
{
|
|
me = op.Size == 0 ? ConstF(0f) : ConstF(0d);
|
|
}
|
|
else
|
|
{
|
|
me = context.VectorExtract(type, GetVec(op.Rm), 0);
|
|
}
|
|
|
|
Delegate dlg = op.Size != 0
|
|
? (Delegate)new _S32_F64_F64_Bool(SoftFloat64.FPCompare)
|
|
: (Delegate)new _S32_F32_F32_Bool(SoftFloat32.FPCompare);
|
|
|
|
Operand nzcv = context.Call(dlg, ne, me, Const(signalNaNs));
|
|
|
|
EmitSetNzcv(context, nzcv);
|
|
}
|
|
}
|
|
|
|
private static void EmitSetNzcv(ArmEmitterContext context, Operand nzcv)
|
|
{
|
|
Operand Extract(Operand value, int bit)
|
|
{
|
|
if (bit != 0)
|
|
{
|
|
value = context.ShiftRightUI(value, Const(bit));
|
|
}
|
|
|
|
value = context.BitwiseAnd(value, Const(1));
|
|
|
|
return value;
|
|
}
|
|
|
|
SetFlag(context, PState.VFlag, Extract(nzcv, 0));
|
|
SetFlag(context, PState.CFlag, Extract(nzcv, 1));
|
|
SetFlag(context, PState.ZFlag, Extract(nzcv, 2));
|
|
SetFlag(context, PState.NFlag, Extract(nzcv, 3));
|
|
}
|
|
|
|
private static void EmitCmpOp(ArmEmitterContext context, Func2I emitCmp, bool scalar)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand res = context.VectorZero();
|
|
|
|
int elems = !scalar ? op.GetBytesCount() >> op.Size : 1;
|
|
|
|
ulong szMask = ulong.MaxValue >> (64 - (8 << op.Size));
|
|
|
|
for (int index = 0; index < elems; index++)
|
|
{
|
|
Operand ne = EmitVectorExtractSx(context, op.Rn, index, op.Size);
|
|
Operand me;
|
|
|
|
if (op is OpCodeSimdReg binOp)
|
|
{
|
|
me = EmitVectorExtractSx(context, binOp.Rm, index, op.Size);
|
|
}
|
|
else
|
|
{
|
|
me = Const(0L);
|
|
}
|
|
|
|
Operand isTrue = emitCmp(ne, me);
|
|
|
|
Operand mask = context.ConditionalSelect(isTrue, Const(szMask), Const(0L));
|
|
|
|
res = EmitVectorInsert(context, res, mask, index, op.Size);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
|
|
private static void EmitCmtstOp(ArmEmitterContext context, bool scalar)
|
|
{
|
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
|
|
|
Operand res = context.VectorZero();
|
|
|
|
int elems = !scalar ? op.GetBytesCount() >> op.Size : 1;
|
|
|
|
ulong szMask = ulong.MaxValue >> (64 - (8 << op.Size));
|
|
|
|
for (int index = 0; index < elems; index++)
|
|
{
|
|
Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
|
|
Operand me = EmitVectorExtractZx(context, op.Rm, index, op.Size);
|
|
|
|
Operand test = context.BitwiseAnd(ne, me);
|
|
|
|
Operand isTrue = context.ICompareNotEqual(test, Const(0L));
|
|
|
|
Operand mask = context.ConditionalSelect(isTrue, Const(szMask), Const(0L));
|
|
|
|
res = EmitVectorInsert(context, res, mask, index, op.Size);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
|
|
private static void EmitCmpOpF(
|
|
ArmEmitterContext context,
|
|
_F32_F32_F32 f32,
|
|
_F64_F64_F64 f64,
|
|
bool scalar)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand res = context.VectorZero();
|
|
|
|
int sizeF = op.Size & 1;
|
|
|
|
OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
|
|
|
|
int elems = !scalar ? op.GetBytesCount() >> sizeF + 2 : 1;
|
|
|
|
for (int index = 0; index < elems; index++)
|
|
{
|
|
Operand ne = context.VectorExtract(type, GetVec(op.Rn), index);
|
|
Operand me;
|
|
|
|
if (op is OpCodeSimdReg binOp)
|
|
{
|
|
me = context.VectorExtract(type, GetVec(binOp.Rm), index);
|
|
}
|
|
else
|
|
{
|
|
me = sizeF == 0 ? ConstF(0f) : ConstF(0d);
|
|
}
|
|
|
|
Operand e = EmitSoftFloatCall(context, f32, f64, ne, me);
|
|
|
|
res = context.VectorInsert(res, e, index);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
|
|
private enum CmpCondition
|
|
{
|
|
Equal = 0,
|
|
GreaterThanOrEqual = 5,
|
|
GreaterThan = 6
|
|
}
|
|
|
|
private static void EmitCmpSseOrSse2OpF(
|
|
ArmEmitterContext context,
|
|
CmpCondition cond,
|
|
bool scalar,
|
|
bool isLeOrLt = false)
|
|
{
|
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
|
|
|
Operand n = GetVec(op.Rn);
|
|
Operand m = op is OpCodeSimdReg binOp ? GetVec(binOp.Rm) : context.VectorZero();
|
|
|
|
int sizeF = op.Size & 1;
|
|
|
|
if (sizeF == 0)
|
|
{
|
|
Intrinsic inst = scalar ? Intrinsic.X86Cmpss : Intrinsic.X86Cmpps;
|
|
|
|
Operand res = isLeOrLt
|
|
? context.AddIntrinsic(inst, m, n, Const((int)cond))
|
|
: context.AddIntrinsic(inst, n, m, Const((int)cond));
|
|
|
|
if (scalar)
|
|
{
|
|
res = context.VectorZeroUpper96(res);
|
|
}
|
|
else if (op.RegisterSize == RegisterSize.Simd64)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
else /* if (sizeF == 1) */
|
|
{
|
|
Intrinsic inst = scalar ? Intrinsic.X86Cmpsd : Intrinsic.X86Cmppd;
|
|
|
|
Operand res = isLeOrLt
|
|
? context.AddIntrinsic(inst, m, n, Const((int)cond))
|
|
: context.AddIntrinsic(inst, n, m, Const((int)cond));
|
|
|
|
if (scalar)
|
|
{
|
|
res = context.VectorZeroUpper64(res);
|
|
}
|
|
|
|
context.Copy(GetVec(op.Rd), res);
|
|
}
|
|
}
|
|
}
|
|
} |