2015-07-23 05:25:30 +02:00
|
|
|
// Copyright 2015 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2016-04-13 05:24:34 +02:00
|
|
|
#include <algorithm>
|
2015-07-23 05:25:30 +02:00
|
|
|
#include <smmintrin.h>
|
|
|
|
|
2015-08-12 06:00:44 +02:00
|
|
|
#include "common/x64/abi.h"
|
2015-08-12 06:19:20 +02:00
|
|
|
#include "common/x64/cpu_detect.h"
|
2015-08-12 06:00:44 +02:00
|
|
|
#include "common/x64/emitter.h"
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
#include "shader.h"
|
2015-08-12 06:00:44 +02:00
|
|
|
#include "shader_jit_x64.h"
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-03 04:16:38 +01:00
|
|
|
#include "video_core/pica_state.h"
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
namespace Pica {
|
|
|
|
|
|
|
|
namespace Shader {
|
|
|
|
|
|
|
|
using namespace Gen;
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
typedef void (JitShader::*JitFunction)(Instruction instr);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
const JitFunction instr_table[64] = {
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_ADD, // add
|
|
|
|
&JitShader::Compile_DP3, // dp3
|
|
|
|
&JitShader::Compile_DP4, // dp4
|
|
|
|
&JitShader::Compile_DPH, // dph
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // unknown
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_EX2, // ex2
|
|
|
|
&JitShader::Compile_LG2, // lg2
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // unknown
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_MUL, // mul
|
|
|
|
&JitShader::Compile_SGE, // sge
|
|
|
|
&JitShader::Compile_SLT, // slt
|
|
|
|
&JitShader::Compile_FLR, // flr
|
|
|
|
&JitShader::Compile_MAX, // max
|
|
|
|
&JitShader::Compile_MIN, // min
|
|
|
|
&JitShader::Compile_RCP, // rcp
|
|
|
|
&JitShader::Compile_RSQ, // rsq
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_MOVA, // mova
|
|
|
|
&JitShader::Compile_MOV, // mov
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_DPH, // dphi
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // unknown
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_SGE, // sgei
|
|
|
|
&JitShader::Compile_SLT, // slti
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
|
|
|
nullptr, // unknown
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_NOP, // nop
|
|
|
|
&JitShader::Compile_END, // end
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // break
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_CALL, // call
|
|
|
|
&JitShader::Compile_CALLC, // callc
|
|
|
|
&JitShader::Compile_CALLU, // callu
|
|
|
|
&JitShader::Compile_IF, // ifu
|
|
|
|
&JitShader::Compile_IF, // ifc
|
|
|
|
&JitShader::Compile_LOOP, // loop
|
2015-07-23 05:25:30 +02:00
|
|
|
nullptr, // emit
|
|
|
|
nullptr, // sete
|
2016-04-13 05:34:03 +02:00
|
|
|
&JitShader::Compile_JMP, // jmpc
|
|
|
|
&JitShader::Compile_JMP, // jmpu
|
|
|
|
&JitShader::Compile_CMP, // cmp
|
|
|
|
&JitShader::Compile_CMP, // cmp
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // madi
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
|
|
|
&JitShader::Compile_MAD, // mad
|
2015-07-23 05:25:30 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// The following is used to alias some commonly used registers. Generally, RAX-RDX and XMM0-XMM3 can
|
|
|
|
// be used as scratch registers within a compiler function. The other registers have designated
|
|
|
|
// purposes, as documented below:
|
|
|
|
|
|
|
|
/// Pointer to the uniform memory
|
2015-07-31 18:40:09 +02:00
|
|
|
static const X64Reg UNIFORMS = R9;
|
2015-07-23 05:25:30 +02:00
|
|
|
/// The two 32-bit VS address offset registers set by the MOVA instruction
|
2015-07-31 18:40:09 +02:00
|
|
|
static const X64Reg ADDROFFS_REG_0 = R10;
|
|
|
|
static const X64Reg ADDROFFS_REG_1 = R11;
|
2015-07-23 05:25:30 +02:00
|
|
|
/// VS loop count register
|
|
|
|
static const X64Reg LOOPCOUNT_REG = R12;
|
|
|
|
/// Current VS loop iteration number (we could probably use LOOPCOUNT_REG, but this quicker)
|
|
|
|
static const X64Reg LOOPCOUNT = RSI;
|
|
|
|
/// Number to increment LOOPCOUNT_REG by on each loop iteration
|
|
|
|
static const X64Reg LOOPINC = RDI;
|
|
|
|
/// Result of the previous CMP instruction for the X-component comparison
|
|
|
|
static const X64Reg COND0 = R13;
|
|
|
|
/// Result of the previous CMP instruction for the Y-component comparison
|
|
|
|
static const X64Reg COND1 = R14;
|
|
|
|
/// Pointer to the UnitState instance for the current VS unit
|
2015-08-15 22:51:32 +02:00
|
|
|
static const X64Reg REGISTERS = R15;
|
2015-07-23 05:25:30 +02:00
|
|
|
/// SIMD scratch register
|
|
|
|
static const X64Reg SCRATCH = XMM0;
|
|
|
|
/// Loaded with the first swizzled source register, otherwise can be used as a scratch register
|
|
|
|
static const X64Reg SRC1 = XMM1;
|
|
|
|
/// Loaded with the second swizzled source register, otherwise can be used as a scratch register
|
|
|
|
static const X64Reg SRC2 = XMM2;
|
|
|
|
/// Loaded with the third swizzled source register, otherwise can be used as a scratch register
|
|
|
|
static const X64Reg SRC3 = XMM3;
|
2015-08-24 06:46:10 +02:00
|
|
|
/// Additional scratch register
|
|
|
|
static const X64Reg SCRATCH2 = XMM4;
|
2015-07-23 05:25:30 +02:00
|
|
|
/// Constant vector of [1.0f, 1.0f, 1.0f, 1.0f], used to efficiently set a vector to one
|
|
|
|
static const X64Reg ONE = XMM14;
|
|
|
|
/// Constant vector of [-0.f, -0.f, -0.f, -0.f], used to efficiently negate a vector with XOR
|
|
|
|
static const X64Reg NEGBIT = XMM15;
|
|
|
|
|
2015-08-26 09:12:14 +02:00
|
|
|
// State registers that must not be modified by external functions calls
|
|
|
|
// Scratch registers, e.g., SRC1 and SCRATCH, have to be saved on the side if needed
|
|
|
|
static const BitSet32 persistent_regs = {
|
|
|
|
UNIFORMS, REGISTERS, // Pointers to register blocks
|
|
|
|
ADDROFFS_REG_0, ADDROFFS_REG_1, LOOPCOUNT_REG, COND0, COND1, // Cached registers
|
|
|
|
ONE+16, NEGBIT+16, // Constants
|
|
|
|
};
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
/// Raw constant for the source register selector that indicates no swizzling is performed
|
|
|
|
static const u8 NO_SRC_REG_SWIZZLE = 0x1b;
|
|
|
|
/// Raw constant for the destination register enable mask that indicates all components are enabled
|
|
|
|
static const u8 NO_DEST_REG_MASK = 0xf;
|
|
|
|
|
2016-03-18 00:45:09 +01:00
|
|
|
/**
|
|
|
|
* Get the vertex shader instruction for a given offset in the current shader program
|
|
|
|
* @param offset Offset in the current shader program of the instruction
|
|
|
|
* @return Instruction at the specified offset
|
|
|
|
*/
|
|
|
|
static Instruction GetVertexShaderInstruction(size_t offset) {
|
|
|
|
return { g_state.vs.program_code[offset] };
|
|
|
|
}
|
|
|
|
|
2016-04-02 06:02:03 +02:00
|
|
|
static void LogCritical(const char* msg) {
|
2016-04-24 17:40:52 +02:00
|
|
|
LOG_CRITICAL(HW_GPU, "%s", msg);
|
2016-04-02 06:02:03 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:35:36 +02:00
|
|
|
void JitShader::Compile_Assert(bool condition, const char* msg) {
|
2016-04-02 06:02:03 +02:00
|
|
|
if (!condition) {
|
|
|
|
ABI_CallFunctionP(reinterpret_cast<const void*>(LogCritical), const_cast<char*>(msg));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
/**
|
|
|
|
* Loads and swizzles a source register into the specified XMM register.
|
|
|
|
* @param instr VS instruction, used for determining how to load the source register
|
|
|
|
* @param src_num Number indicating which source register to load (1 = src1, 2 = src2, 3 = src3)
|
|
|
|
* @param src_reg SourceRegister object corresponding to the source register to load
|
|
|
|
* @param dest Destination XMM register to store the loaded, swizzled source register
|
|
|
|
*/
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg, X64Reg dest) {
|
2015-07-23 05:25:30 +02:00
|
|
|
X64Reg src_ptr;
|
2015-09-07 07:49:57 +02:00
|
|
|
size_t src_offset;
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
if (src_reg.GetRegisterType() == RegisterType::FloatUniform) {
|
|
|
|
src_ptr = UNIFORMS;
|
|
|
|
src_offset = src_reg.GetIndex() * sizeof(float24) * 4;
|
|
|
|
} else {
|
2015-08-15 22:51:32 +02:00
|
|
|
src_ptr = REGISTERS;
|
2015-07-12 01:57:59 +02:00
|
|
|
src_offset = UnitState<false>::InputOffset(src_reg);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
2015-09-07 07:49:57 +02:00
|
|
|
int src_offset_disp = (int)src_offset;
|
|
|
|
ASSERT_MSG(src_offset == src_offset_disp, "Source register offset too large for int type");
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
unsigned operand_desc_id;
|
2016-03-09 14:48:45 +01:00
|
|
|
|
|
|
|
const bool is_inverted = (0 != (instr.opcode.Value().GetInfo().subtype & OpCode::Info::SrcInversed));
|
|
|
|
|
|
|
|
unsigned address_register_index;
|
|
|
|
unsigned offset_src;
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
|
|
|
|
instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
|
|
|
|
operand_desc_id = instr.mad.operand_desc_id;
|
2016-03-09 14:48:45 +01:00
|
|
|
offset_src = is_inverted ? 3 : 2;
|
|
|
|
address_register_index = instr.mad.address_register_index;
|
2015-07-23 05:25:30 +02:00
|
|
|
} else {
|
|
|
|
operand_desc_id = instr.common.operand_desc_id;
|
2016-03-09 14:48:45 +01:00
|
|
|
offset_src = is_inverted ? 2 : 1;
|
|
|
|
address_register_index = instr.common.address_register_index;
|
|
|
|
}
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-09 14:48:45 +01:00
|
|
|
if (src_num == offset_src && address_register_index != 0) {
|
|
|
|
switch (address_register_index) {
|
|
|
|
case 1: // address offset 1
|
|
|
|
MOVAPS(dest, MComplex(src_ptr, ADDROFFS_REG_0, SCALE_1, src_offset_disp));
|
|
|
|
break;
|
|
|
|
case 2: // address offset 2
|
|
|
|
MOVAPS(dest, MComplex(src_ptr, ADDROFFS_REG_1, SCALE_1, src_offset_disp));
|
|
|
|
break;
|
|
|
|
case 3: // address offset 3
|
|
|
|
MOVAPS(dest, MComplex(src_ptr, LOOPCOUNT_REG, SCALE_1, src_offset_disp));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
2016-03-09 14:48:45 +01:00
|
|
|
} else {
|
|
|
|
// Load the source
|
|
|
|
MOVAPS(dest, MDisp(src_ptr, src_offset_disp));
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SwizzlePattern swiz = { g_state.vs.swizzle_data[operand_desc_id] };
|
|
|
|
|
|
|
|
// Generate instructions for source register swizzling as needed
|
|
|
|
u8 sel = swiz.GetRawSelector(src_num);
|
|
|
|
if (sel != NO_SRC_REG_SWIZZLE) {
|
|
|
|
// Selector component order needs to be reversed for the SHUFPS instruction
|
|
|
|
sel = ((sel & 0xc0) >> 6) | ((sel & 3) << 6) | ((sel & 0xc) << 2) | ((sel & 0x30) >> 2);
|
|
|
|
|
|
|
|
// Shuffle inputs for swizzle
|
|
|
|
SHUFPS(dest, R(dest), sel);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the source register should be negated, flip the negative bit using XOR
|
|
|
|
const bool negate[] = { swiz.negate_src1, swiz.negate_src2, swiz.negate_src3 };
|
|
|
|
if (negate[src_num - 1]) {
|
|
|
|
XORPS(dest, R(NEGBIT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_DestEnable(Instruction instr,X64Reg src) {
|
2015-07-23 05:25:30 +02:00
|
|
|
DestRegister dest;
|
|
|
|
unsigned operand_desc_id;
|
|
|
|
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
|
|
|
|
instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
|
|
|
|
operand_desc_id = instr.mad.operand_desc_id;
|
|
|
|
dest = instr.mad.dest.Value();
|
|
|
|
} else {
|
|
|
|
operand_desc_id = instr.common.operand_desc_id;
|
|
|
|
dest = instr.common.dest.Value();
|
|
|
|
}
|
|
|
|
|
|
|
|
SwizzlePattern swiz = { g_state.vs.swizzle_data[operand_desc_id] };
|
|
|
|
|
2015-09-07 07:49:57 +02:00
|
|
|
int dest_offset_disp = (int)UnitState<false>::OutputOffset(dest);
|
|
|
|
ASSERT_MSG(dest_offset_disp == UnitState<false>::OutputOffset(dest), "Destinaton offset too large for int type");
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
// If all components are enabled, write the result to the destination register
|
|
|
|
if (swiz.dest_mask == NO_DEST_REG_MASK) {
|
|
|
|
// Store dest back to memory
|
2015-09-07 07:49:57 +02:00
|
|
|
MOVAPS(MDisp(REGISTERS, dest_offset_disp), src);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
} else {
|
|
|
|
// Not all components are enabled, so mask the result when storing to the destination register...
|
2015-09-07 07:49:57 +02:00
|
|
|
MOVAPS(SCRATCH, MDisp(REGISTERS, dest_offset_disp));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-12 23:42:13 +02:00
|
|
|
if (Common::GetCPUCaps().sse4_1) {
|
2015-07-23 05:25:30 +02:00
|
|
|
u8 mask = ((swiz.dest_mask & 1) << 3) | ((swiz.dest_mask & 8) >> 3) | ((swiz.dest_mask & 2) << 1) | ((swiz.dest_mask & 4) >> 1);
|
|
|
|
BLENDPS(SCRATCH, R(src), mask);
|
|
|
|
} else {
|
2015-08-24 06:46:10 +02:00
|
|
|
MOVAPS(SCRATCH2, R(src));
|
|
|
|
UNPCKHPS(SCRATCH2, R(SCRATCH)); // Unpack X/Y components of source and destination
|
2015-07-23 05:25:30 +02:00
|
|
|
UNPCKLPS(SCRATCH, R(src)); // Unpack Z/W components of source and destination
|
|
|
|
|
|
|
|
// Compute selector to selectively copy source components to destination for SHUFPS instruction
|
|
|
|
u8 sel = ((swiz.DestComponentEnabled(0) ? 1 : 0) << 0) |
|
|
|
|
((swiz.DestComponentEnabled(1) ? 3 : 2) << 2) |
|
|
|
|
((swiz.DestComponentEnabled(2) ? 0 : 1) << 4) |
|
|
|
|
((swiz.DestComponentEnabled(3) ? 2 : 3) << 6);
|
2015-08-24 06:46:10 +02:00
|
|
|
SHUFPS(SCRATCH, R(SCRATCH2), sel);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Store dest back to memory
|
2015-09-07 07:49:57 +02:00
|
|
|
MOVAPS(MDisp(REGISTERS, dest_offset_disp), SCRATCH);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_SanitizedMul(Gen::X64Reg src1, Gen::X64Reg src2, Gen::X64Reg scratch) {
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(scratch, R(src1));
|
|
|
|
CMPPS(scratch, R(src2), CMP_ORD);
|
|
|
|
|
|
|
|
MULPS(src1, R(src2));
|
|
|
|
|
|
|
|
MOVAPS(src2, R(src1));
|
|
|
|
CMPPS(src2, R(src2), CMP_UNORD);
|
|
|
|
|
|
|
|
XORPS(scratch, R(src2));
|
|
|
|
ANDPS(src1, R(scratch));
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_EvaluateCondition(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
// Note: NXOR is used below to check for equality
|
|
|
|
switch (instr.flow_control.op) {
|
|
|
|
case Instruction::FlowControlType::Or:
|
|
|
|
MOV(32, R(RAX), R(COND0));
|
|
|
|
MOV(32, R(RBX), R(COND1));
|
|
|
|
XOR(32, R(RAX), Imm32(instr.flow_control.refx.Value() ^ 1));
|
|
|
|
XOR(32, R(RBX), Imm32(instr.flow_control.refy.Value() ^ 1));
|
|
|
|
OR(32, R(RAX), R(RBX));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::FlowControlType::And:
|
|
|
|
MOV(32, R(RAX), R(COND0));
|
|
|
|
MOV(32, R(RBX), R(COND1));
|
|
|
|
XOR(32, R(RAX), Imm32(instr.flow_control.refx.Value() ^ 1));
|
|
|
|
XOR(32, R(RBX), Imm32(instr.flow_control.refy.Value() ^ 1));
|
|
|
|
AND(32, R(RAX), R(RBX));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::FlowControlType::JustX:
|
|
|
|
MOV(32, R(RAX), R(COND0));
|
|
|
|
XOR(32, R(RAX), Imm32(instr.flow_control.refx.Value() ^ 1));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::FlowControlType::JustY:
|
|
|
|
MOV(32, R(RAX), R(COND1));
|
|
|
|
XOR(32, R(RAX), Imm32(instr.flow_control.refy.Value() ^ 1));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_UniformCondition(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
int offset = offsetof(decltype(g_state.vs.uniforms), b) + (instr.flow_control.bool_uniform_id * sizeof(bool));
|
|
|
|
CMP(sizeof(bool) * 8, MDisp(UNIFORMS, offset), Imm8(0));
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
BitSet32 JitShader::PersistentCallerSavedRegs() {
|
2015-08-26 09:12:14 +02:00
|
|
|
return persistent_regs & ABI_ALL_CALLER_SAVED;
|
2015-08-19 01:49:45 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_ADD(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
ADDPS(SRC1, R(SRC2));
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_DP3(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(SRC2, R(SRC1));
|
|
|
|
SHUFPS(SRC2, R(SRC2), _MM_SHUFFLE(1, 1, 1, 1));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(SRC3, R(SRC1));
|
|
|
|
SHUFPS(SRC3, R(SRC3), _MM_SHUFFLE(2, 2, 2, 2));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 0, 0, 0));
|
|
|
|
ADDPS(SRC1, R(SRC2));
|
|
|
|
ADDPS(SRC1, R(SRC3));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_DP4(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(SRC2, R(SRC1));
|
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
|
|
|
|
ADDPS(SRC1, R(SRC2));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(SRC2, R(SRC1));
|
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
|
|
|
|
ADDPS(SRC1, R(SRC2));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_DPH(Instruction instr) {
|
2015-08-21 12:49:21 +02:00
|
|
|
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::DPHI) {
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
|
|
|
|
} else {
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Common::GetCPUCaps().sse4_1) {
|
|
|
|
// Set 4th component to 1.0
|
|
|
|
BLENDPS(SRC1, R(ONE), 0x8); // 0b1000
|
|
|
|
} else {
|
2015-08-24 06:15:39 +02:00
|
|
|
// Set 4th component to 1.0
|
|
|
|
MOVAPS(SCRATCH, R(SRC1));
|
|
|
|
UNPCKHPS(SCRATCH, R(ONE)); // XYZW, 1111 -> Z1__
|
|
|
|
UNPCKLPD(SRC1, R(SCRATCH)); // XYZW, Z1__ -> XYZ1
|
2015-08-24 06:48:15 +02:00
|
|
|
}
|
2015-08-21 12:49:21 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
2015-08-21 12:49:21 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(SRC2, R(SRC1));
|
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
|
|
|
|
ADDPS(SRC1, R(SRC2));
|
2015-08-21 12:49:21 +02:00
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
MOVAPS(SRC2, R(SRC1));
|
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
|
|
|
|
ADDPS(SRC1, R(SRC2));
|
2015-08-21 12:49:21 +02:00
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_EX2(Instruction instr) {
|
2015-08-16 17:22:49 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
MOVSS(XMM0, R(SRC1));
|
2015-08-19 01:49:45 +02:00
|
|
|
|
2015-08-26 09:12:14 +02:00
|
|
|
ABI_PushRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
|
2015-08-16 17:22:49 +02:00
|
|
|
ABI_CallFunction(reinterpret_cast<const void*>(exp2f));
|
2015-08-26 09:12:14 +02:00
|
|
|
ABI_PopRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
|
2015-08-19 01:49:45 +02:00
|
|
|
|
2015-08-16 17:22:49 +02:00
|
|
|
SHUFPS(XMM0, R(XMM0), _MM_SHUFFLE(0, 0, 0, 0));
|
|
|
|
MOVAPS(SRC1, R(XMM0));
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_LG2(Instruction instr) {
|
2015-08-16 17:22:49 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
MOVSS(XMM0, R(SRC1));
|
2015-08-19 01:49:45 +02:00
|
|
|
|
2015-08-26 09:12:14 +02:00
|
|
|
ABI_PushRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
|
2015-08-16 17:22:49 +02:00
|
|
|
ABI_CallFunction(reinterpret_cast<const void*>(log2f));
|
2015-08-26 09:12:14 +02:00
|
|
|
ABI_PopRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
|
2015-08-19 01:49:45 +02:00
|
|
|
|
2015-08-16 17:22:49 +02:00
|
|
|
SHUFPS(XMM0, R(XMM0), _MM_SHUFFLE(0, 0, 0, 0));
|
|
|
|
MOVAPS(SRC1, R(XMM0));
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_MUL(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
2015-08-24 06:48:15 +02:00
|
|
|
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_SGE(Instruction instr) {
|
2015-08-19 14:23:53 +02:00
|
|
|
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::SGEI) {
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
|
|
|
|
} else {
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
}
|
|
|
|
|
2015-08-31 08:09:07 +02:00
|
|
|
CMPPS(SRC2, R(SRC1), CMP_LE);
|
|
|
|
ANDPS(SRC2, R(ONE));
|
2015-08-19 14:23:53 +02:00
|
|
|
|
2015-08-31 08:09:07 +02:00
|
|
|
Compile_DestEnable(instr, SRC2);
|
2015-08-19 14:23:53 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_SLT(Instruction instr) {
|
2015-08-19 14:23:53 +02:00
|
|
|
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::SLTI) {
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
|
|
|
|
} else {
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
}
|
|
|
|
|
|
|
|
CMPPS(SRC1, R(SRC2), CMP_LT);
|
|
|
|
ANDPS(SRC1, R(ONE));
|
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_FLR(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
|
2015-08-12 23:42:13 +02:00
|
|
|
if (Common::GetCPUCaps().sse4_1) {
|
2015-07-23 05:25:30 +02:00
|
|
|
ROUNDFLOORPS(SRC1, R(SRC1));
|
|
|
|
} else {
|
|
|
|
CVTPS2DQ(SRC1, R(SRC1));
|
|
|
|
CVTDQ2PS(SRC1, R(SRC1));
|
|
|
|
}
|
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_MAX(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
2015-08-24 06:46:58 +02:00
|
|
|
// SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
|
2015-07-23 05:25:30 +02:00
|
|
|
MAXPS(SRC1, R(SRC2));
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_MIN(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
2015-08-24 06:46:58 +02:00
|
|
|
// SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
|
2015-07-23 05:25:30 +02:00
|
|
|
MINPS(SRC1, R(SRC2));
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_MOVA(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
SwizzlePattern swiz = { g_state.vs.swizzle_data[instr.common.operand_desc_id] };
|
|
|
|
|
|
|
|
if (!swiz.DestComponentEnabled(0) && !swiz.DestComponentEnabled(1)) {
|
|
|
|
return; // NoOp
|
|
|
|
}
|
|
|
|
|
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
|
2015-08-27 15:21:05 +02:00
|
|
|
// Convert floats to integers using truncation (only care about X and Y components)
|
|
|
|
CVTTPS2DQ(SRC1, R(SRC1));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
// Get result
|
|
|
|
MOVQ_xmm(R(RAX), SRC1);
|
|
|
|
|
|
|
|
// Handle destination enable
|
|
|
|
if (swiz.DestComponentEnabled(0) && swiz.DestComponentEnabled(1)) {
|
2015-07-31 18:40:09 +02:00
|
|
|
// Move and sign-extend low 32 bits
|
|
|
|
MOVSX(64, 32, ADDROFFS_REG_0, R(RAX));
|
|
|
|
|
|
|
|
// Move and sign-extend high 32 bits
|
|
|
|
SHR(64, R(RAX), Imm8(32));
|
|
|
|
MOVSX(64, 32, ADDROFFS_REG_1, R(RAX));
|
|
|
|
|
|
|
|
// Multiply by 16 to be used as an offset later
|
|
|
|
SHL(64, R(ADDROFFS_REG_0), Imm8(4));
|
|
|
|
SHL(64, R(ADDROFFS_REG_1), Imm8(4));
|
2015-07-23 05:25:30 +02:00
|
|
|
} else {
|
|
|
|
if (swiz.DestComponentEnabled(0)) {
|
2015-07-31 18:40:09 +02:00
|
|
|
// Move and sign-extend low 32 bits
|
|
|
|
MOVSX(64, 32, ADDROFFS_REG_0, R(RAX));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-07-31 18:40:09 +02:00
|
|
|
// Multiply by 16 to be used as an offset later
|
|
|
|
SHL(64, R(ADDROFFS_REG_0), Imm8(4));
|
2015-07-23 05:25:30 +02:00
|
|
|
} else if (swiz.DestComponentEnabled(1)) {
|
2015-07-31 18:40:09 +02:00
|
|
|
// Move and sign-extend high 32 bits
|
|
|
|
SHR(64, R(RAX), Imm8(32));
|
|
|
|
MOVSX(64, 32, ADDROFFS_REG_1, R(RAX));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-07-31 18:40:09 +02:00
|
|
|
// Multiply by 16 to be used as an offset later
|
|
|
|
SHL(64, R(ADDROFFS_REG_1), Imm8(4));
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_MOV(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_RCP(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
|
2015-08-23 15:13:36 +02:00
|
|
|
// TODO(bunnei): RCPSS is a pretty rough approximation, this might cause problems if Pica
|
2015-07-23 05:25:30 +02:00
|
|
|
// performs this operation more accurately. This should be checked on hardware.
|
2015-08-23 15:13:36 +02:00
|
|
|
RCPSS(SRC1, R(SRC1));
|
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_RSQ(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
|
2015-08-23 15:13:36 +02:00
|
|
|
// TODO(bunnei): RSQRTSS is a pretty rough approximation, this might cause problems if Pica
|
2015-07-23 05:25:30 +02:00
|
|
|
// performs this operation more accurately. This should be checked on hardware.
|
2015-08-23 15:13:36 +02:00
|
|
|
RSQRTSS(SRC1, R(SRC1));
|
|
|
|
SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_NOP(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_END(Instruction instr) {
|
2015-08-26 09:12:14 +02:00
|
|
|
ABI_PopRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8);
|
2015-07-23 05:25:30 +02:00
|
|
|
RET();
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_CALL(Instruction instr) {
|
2016-03-18 00:45:09 +01:00
|
|
|
// Push offset of the return
|
2016-04-09 23:46:13 +02:00
|
|
|
PUSH(64, Imm32(instr.flow_control.dest_offset + instr.flow_control.num_instructions));
|
2016-03-18 00:45:09 +01:00
|
|
|
|
2016-04-09 23:46:13 +02:00
|
|
|
// Call the subroutine
|
|
|
|
FixupBranch b = CALL();
|
2016-03-18 00:45:09 +01:00
|
|
|
fixup_branches.push_back({ b, instr.flow_control.dest_offset });
|
|
|
|
|
2016-04-09 23:46:13 +02:00
|
|
|
// Skip over the return offset that's on the stack
|
|
|
|
ADD(64, R(RSP), Imm32(8));
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_CALLC(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_EvaluateCondition(instr);
|
|
|
|
FixupBranch b = J_CC(CC_Z, true);
|
|
|
|
Compile_CALL(instr);
|
|
|
|
SetJumpTarget(b);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_CALLU(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_UniformCondition(instr);
|
|
|
|
FixupBranch b = J_CC(CC_Z, true);
|
|
|
|
Compile_CALL(instr);
|
|
|
|
SetJumpTarget(b);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_CMP(Instruction instr) {
|
2015-08-24 02:46:36 +02:00
|
|
|
using Op = Instruction::Common::CompareOpType::Op;
|
|
|
|
Op op_x = instr.common.compare_op.x;
|
|
|
|
Op op_y = instr.common.compare_op.y;
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
|
|
|
|
2015-08-24 02:46:36 +02:00
|
|
|
// SSE doesn't have greater-than (GT) or greater-equal (GE) comparison operators. You need to
|
|
|
|
// emulate them by swapping the lhs and rhs and using LT and LE. NLT and NLE can't be used here
|
|
|
|
// because they don't match when used with NaNs.
|
|
|
|
static const u8 cmp[] = { CMP_EQ, CMP_NEQ, CMP_LT, CMP_LE, CMP_LT, CMP_LE };
|
|
|
|
|
|
|
|
bool invert_op_x = (op_x == Op::GreaterThan || op_x == Op::GreaterEqual);
|
|
|
|
Gen::X64Reg lhs_x = invert_op_x ? SRC2 : SRC1;
|
|
|
|
Gen::X64Reg rhs_x = invert_op_x ? SRC1 : SRC2;
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-24 02:46:36 +02:00
|
|
|
if (op_x == op_y) {
|
2015-07-23 05:25:30 +02:00
|
|
|
// Compare X-component and Y-component together
|
2015-08-24 02:46:36 +02:00
|
|
|
CMPPS(lhs_x, R(rhs_x), cmp[op_x]);
|
|
|
|
MOVQ_xmm(R(COND0), lhs_x);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
MOV(64, R(COND1), R(COND0));
|
|
|
|
} else {
|
2015-08-24 02:46:36 +02:00
|
|
|
bool invert_op_y = (op_y == Op::GreaterThan || op_y == Op::GreaterEqual);
|
|
|
|
Gen::X64Reg lhs_y = invert_op_y ? SRC2 : SRC1;
|
|
|
|
Gen::X64Reg rhs_y = invert_op_y ? SRC1 : SRC2;
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
// Compare X-component
|
2015-08-24 02:46:36 +02:00
|
|
|
MOVAPS(SCRATCH, R(lhs_x));
|
|
|
|
CMPSS(SCRATCH, R(rhs_x), cmp[op_x]);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
// Compare Y-component
|
2015-08-24 02:46:36 +02:00
|
|
|
CMPPS(lhs_y, R(rhs_y), cmp[op_y]);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
MOVQ_xmm(R(COND0), SCRATCH);
|
2015-08-24 02:46:36 +02:00
|
|
|
MOVQ_xmm(R(COND1), lhs_y);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SHR(32, R(COND0), Imm8(31));
|
|
|
|
SHR(64, R(COND1), Imm8(63));
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_MAD(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
Compile_SwizzleSrc(instr, 1, instr.mad.src1, SRC1);
|
|
|
|
|
|
|
|
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.mad.src2i, SRC2);
|
|
|
|
Compile_SwizzleSrc(instr, 3, instr.mad.src3i, SRC3);
|
|
|
|
} else {
|
|
|
|
Compile_SwizzleSrc(instr, 2, instr.mad.src2, SRC2);
|
|
|
|
Compile_SwizzleSrc(instr, 3, instr.mad.src3, SRC3);
|
|
|
|
}
|
|
|
|
|
2015-08-24 06:48:15 +02:00
|
|
|
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
|
|
|
ADDPS(SRC1, R(SRC3));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
Compile_DestEnable(instr, SRC1);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_IF(Instruction instr) {
|
2016-04-13 05:35:36 +02:00
|
|
|
Compile_Assert(instr.flow_control.dest_offset >= program_counter, "Backwards if-statements not supported");
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
// Evaluate the "IF" condition
|
|
|
|
if (instr.opcode.Value() == OpCode::Id::IFU) {
|
|
|
|
Compile_UniformCondition(instr);
|
|
|
|
} else if (instr.opcode.Value() == OpCode::Id::IFC) {
|
|
|
|
Compile_EvaluateCondition(instr);
|
|
|
|
}
|
|
|
|
FixupBranch b = J_CC(CC_Z, true);
|
|
|
|
|
|
|
|
// Compile the code that corresponds to the condition evaluating as true
|
2016-01-24 11:10:56 +01:00
|
|
|
Compile_Block(instr.flow_control.dest_offset);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
// If there isn't an "ELSE" condition, we are done here
|
|
|
|
if (instr.flow_control.num_instructions == 0) {
|
|
|
|
SetJumpTarget(b);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
FixupBranch b2 = J(true);
|
|
|
|
|
|
|
|
SetJumpTarget(b);
|
|
|
|
|
|
|
|
// This code corresponds to the "ELSE" condition
|
|
|
|
// Comple the code that corresponds to the condition evaluating as false
|
2016-01-24 11:10:56 +01:00
|
|
|
Compile_Block(instr.flow_control.dest_offset + instr.flow_control.num_instructions);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
SetJumpTarget(b2);
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_LOOP(Instruction instr) {
|
2016-04-13 05:35:36 +02:00
|
|
|
Compile_Assert(instr.flow_control.dest_offset >= program_counter, "Backwards loops not supported");
|
|
|
|
Compile_Assert(!looping, "Nested loops not supported");
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
looping = true;
|
|
|
|
|
|
|
|
int offset = offsetof(decltype(g_state.vs.uniforms), i) + (instr.flow_control.int_uniform_id * sizeof(Math::Vec4<u8>));
|
|
|
|
MOV(32, R(LOOPCOUNT), MDisp(UNIFORMS, offset));
|
|
|
|
MOV(32, R(LOOPCOUNT_REG), R(LOOPCOUNT));
|
|
|
|
SHR(32, R(LOOPCOUNT_REG), Imm8(8));
|
|
|
|
AND(32, R(LOOPCOUNT_REG), Imm32(0xff)); // Y-component is the start
|
|
|
|
MOV(32, R(LOOPINC), R(LOOPCOUNT));
|
|
|
|
SHR(32, R(LOOPINC), Imm8(16));
|
|
|
|
MOVZX(32, 8, LOOPINC, R(LOOPINC)); // Z-component is the incrementer
|
|
|
|
MOVZX(32, 8, LOOPCOUNT, R(LOOPCOUNT)); // X-component is iteration count
|
|
|
|
ADD(32, R(LOOPCOUNT), Imm8(1)); // Iteration count is X-component + 1
|
|
|
|
|
|
|
|
auto loop_start = GetCodePtr();
|
|
|
|
|
2016-01-24 11:10:56 +01:00
|
|
|
Compile_Block(instr.flow_control.dest_offset + 1);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
ADD(32, R(LOOPCOUNT_REG), R(LOOPINC)); // Increment LOOPCOUNT_REG by Z-component
|
|
|
|
SUB(32, R(LOOPCOUNT), Imm8(1)); // Increment loop count by 1
|
|
|
|
J_CC(CC_NZ, loop_start); // Loop if not equal
|
|
|
|
|
|
|
|
looping = false;
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_JMP(Instruction instr) {
|
2015-07-23 05:25:30 +02:00
|
|
|
if (instr.opcode.Value() == OpCode::Id::JMPC)
|
|
|
|
Compile_EvaluateCondition(instr);
|
|
|
|
else if (instr.opcode.Value() == OpCode::Id::JMPU)
|
|
|
|
Compile_UniformCondition(instr);
|
|
|
|
else
|
|
|
|
UNREACHABLE();
|
|
|
|
|
2016-01-25 05:20:39 +01:00
|
|
|
bool inverted_condition = (instr.opcode.Value() == OpCode::Id::JMPU) &&
|
|
|
|
(instr.flow_control.num_instructions & 1);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-18 00:45:09 +01:00
|
|
|
FixupBranch b = J_CC(inverted_condition ? CC_Z : CC_NZ, true);
|
|
|
|
fixup_branches.push_back({ b, instr.flow_control.dest_offset });
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_Block(unsigned end) {
|
2016-03-18 00:45:09 +01:00
|
|
|
while (program_counter < end) {
|
|
|
|
Compile_NextInstr();
|
|
|
|
}
|
|
|
|
}
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_Return() {
|
2016-03-18 00:45:09 +01:00
|
|
|
// Peek return offset on the stack and check if we're at that offset
|
2016-04-09 23:46:13 +02:00
|
|
|
MOV(64, R(RAX), MDisp(RSP, 8));
|
2016-03-18 00:45:09 +01:00
|
|
|
CMP(32, R(RAX), Imm32(program_counter));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-18 00:45:09 +01:00
|
|
|
// If so, jump back to before CALL
|
|
|
|
FixupBranch b = J_CC(CC_NZ, true);
|
2016-04-09 23:46:13 +02:00
|
|
|
RET();
|
2016-03-18 00:45:09 +01:00
|
|
|
SetJumpTarget(b);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile_NextInstr() {
|
2016-04-13 05:24:34 +02:00
|
|
|
if (std::binary_search(return_offsets.begin(), return_offsets.end(), program_counter)) {
|
2016-03-18 00:45:09 +01:00
|
|
|
Compile_Return();
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_MSG(code_ptr[program_counter] == nullptr, "Tried to compile already compiled shader location!");
|
|
|
|
code_ptr[program_counter] = GetCodePtr();
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-18 00:45:09 +01:00
|
|
|
Instruction instr = GetVertexShaderInstruction(program_counter++);
|
2016-03-18 00:51:43 +01:00
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
OpCode::Id opcode = instr.opcode.Value();
|
|
|
|
auto instr_func = instr_table[static_cast<unsigned>(opcode)];
|
|
|
|
|
|
|
|
if (instr_func) {
|
|
|
|
// JIT the instruction!
|
|
|
|
((*this).*instr_func)(instr);
|
|
|
|
} else {
|
|
|
|
// Unhandled instruction
|
2015-09-02 08:19:11 +02:00
|
|
|
LOG_CRITICAL(HW_GPU, "Unhandled instruction: 0x%02x (0x%08x)",
|
2016-03-12 18:06:28 +01:00
|
|
|
instr.opcode.Value().EffectiveOpCode(), instr.hex);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::FindReturnOffsets() {
|
2016-03-18 00:45:09 +01:00
|
|
|
return_offsets.clear();
|
|
|
|
|
|
|
|
for (size_t offset = 0; offset < g_state.vs.program_code.size(); ++offset) {
|
|
|
|
Instruction instr = GetVertexShaderInstruction(offset);
|
|
|
|
|
|
|
|
switch (instr.opcode.Value()) {
|
|
|
|
case OpCode::Id::CALL:
|
|
|
|
case OpCode::Id::CALLC:
|
|
|
|
case OpCode::Id::CALLU:
|
2016-04-13 05:24:34 +02:00
|
|
|
return_offsets.push_back(instr.flow_control.dest_offset + instr.flow_control.num_instructions);
|
2016-03-18 00:45:09 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-04-13 05:24:34 +02:00
|
|
|
|
|
|
|
// Sort for efficient binary search later
|
|
|
|
std::sort(return_offsets.begin(), return_offsets.end());
|
2016-03-18 00:45:09 +01:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
void JitShader::Compile() {
|
2016-04-09 17:39:56 +02:00
|
|
|
// Reset flow control state
|
2016-03-20 05:37:05 +01:00
|
|
|
program = (CompiledShader*)GetCodePtr();
|
2016-04-09 17:39:56 +02:00
|
|
|
program_counter = 0;
|
|
|
|
looping = false;
|
|
|
|
code_ptr.fill(nullptr);
|
|
|
|
fixup_branches.clear();
|
|
|
|
|
|
|
|
// Find all `CALL` instructions and identify return locations
|
|
|
|
FindReturnOffsets();
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-26 09:12:14 +02:00
|
|
|
// The stack pointer is 8 modulo 16 at the entry of a procedure
|
|
|
|
ABI_PushRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8);
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2015-08-15 22:51:32 +02:00
|
|
|
MOV(PTRBITS, R(REGISTERS), R(ABI_PARAM1));
|
2015-07-23 05:25:30 +02:00
|
|
|
MOV(PTRBITS, R(UNIFORMS), ImmPtr(&g_state.vs.uniforms));
|
|
|
|
|
|
|
|
// Zero address/loop registers
|
|
|
|
XOR(64, R(ADDROFFS_REG_0), R(ADDROFFS_REG_0));
|
|
|
|
XOR(64, R(ADDROFFS_REG_1), R(ADDROFFS_REG_1));
|
|
|
|
XOR(64, R(LOOPCOUNT_REG), R(LOOPCOUNT_REG));
|
|
|
|
|
|
|
|
// Used to set a register to one
|
|
|
|
static const __m128 one = { 1.f, 1.f, 1.f, 1.f };
|
|
|
|
MOV(PTRBITS, R(RAX), ImmPtr(&one));
|
2015-08-24 06:39:50 +02:00
|
|
|
MOVAPS(ONE, MatR(RAX));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
|
|
|
// Used to negate registers
|
|
|
|
static const __m128 neg = { -0.f, -0.f, -0.f, -0.f };
|
|
|
|
MOV(PTRBITS, R(RAX), ImmPtr(&neg));
|
2015-08-24 06:39:50 +02:00
|
|
|
MOVAPS(NEGBIT, MatR(RAX));
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-18 00:45:09 +01:00
|
|
|
// Jump to start of the shader program
|
2016-03-27 03:02:15 +02:00
|
|
|
JMPptr(R(ABI_PARAM2));
|
2016-03-18 00:45:09 +01:00
|
|
|
|
|
|
|
// Compile entire program
|
|
|
|
Compile_Block(static_cast<unsigned>(g_state.vs.program_code.size()));
|
2015-08-12 06:00:44 +02:00
|
|
|
|
2016-03-18 00:45:09 +01:00
|
|
|
// Set the target for any incomplete branches now that the entire shader program has been emitted
|
|
|
|
for (const auto& branch : fixup_branches) {
|
|
|
|
SetJumpTarget(branch.first, code_ptr[branch.second]);
|
2015-07-23 05:25:30 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:29:25 +02:00
|
|
|
// Free memory that's no longer needed
|
|
|
|
return_offsets.clear();
|
|
|
|
return_offsets.shrink_to_fit();
|
|
|
|
fixup_branches.clear();
|
|
|
|
fixup_branches.shrink_to_fit();
|
|
|
|
|
2016-03-20 05:37:05 +01:00
|
|
|
uintptr_t size = reinterpret_cast<uintptr_t>(GetCodePtr()) - reinterpret_cast<uintptr_t>(program);
|
|
|
|
ASSERT_MSG(size <= MAX_SHADER_SIZE, "Compiled a shader that exceeds the allocated size!");
|
2015-07-23 05:25:30 +02:00
|
|
|
|
2016-03-20 05:37:05 +01:00
|
|
|
LOG_DEBUG(HW_GPU, "Compiled shader size=%d", size);
|
2015-08-12 06:00:44 +02:00
|
|
|
}
|
|
|
|
|
2016-04-13 05:34:03 +02:00
|
|
|
JitShader::JitShader() {
|
2016-03-20 05:37:05 +01:00
|
|
|
AllocCodeSpace(MAX_SHADER_SIZE);
|
2015-08-12 06:00:44 +02:00
|
|
|
}
|
|
|
|
|
2015-07-23 05:25:30 +02:00
|
|
|
} // namespace Shader
|
|
|
|
|
|
|
|
} // namespace Pica
|