tests/skyeye_interpreter: Update Skyeye (22-08-1016)

Matches the version of Skyeye in citra commit
7b4dcacbb2006de6483e982b21956a8f3098aa1d
This commit is contained in:
MerryMage 2016-08-22 14:07:54 +01:00
parent 1abe881921
commit 20e253ece2
14 changed files with 3160 additions and 2994 deletions

View file

@ -10,6 +10,7 @@ set(SRCS
skyeye_interpreter/dyncom/arm_dyncom_dec.cpp
skyeye_interpreter/dyncom/arm_dyncom_interpreter.cpp
skyeye_interpreter/dyncom/arm_dyncom_thumb.cpp
skyeye_interpreter/dyncom/arm_dyncom_trans.cpp
skyeye_interpreter/skyeye_common/armstate.cpp
skyeye_interpreter/skyeye_common/armsupp.cpp
skyeye_interpreter/skyeye_common/vfp/vfp.cpp
@ -23,6 +24,7 @@ set(HEADERS
skyeye_interpreter/dyncom/arm_dyncom_interpreter.h
skyeye_interpreter/dyncom/arm_dyncom_run.h
skyeye_interpreter/dyncom/arm_dyncom_thumb.h
skyeye_interpreter/dyncom/arm_dyncom_trans.h
skyeye_interpreter/skyeye_common/arm_regformat.h
skyeye_interpreter/skyeye_common/armstate.h
skyeye_interpreter/skyeye_common/armsupp.h

View file

@ -422,6 +422,10 @@ ARMDecodeStatus DecodeARMInstruction(u32 instr, s32* idx) {
n = arm_instruction[i].attribute_value;
base = 0;
// 3DS has no VFP3 support
if (arm_instruction[i].version == ARMVFP3)
continue;
while (n) {
if (arm_instruction[i].content[base + 1] == 31 && arm_instruction[i].content[base] == 0) {
// clrex

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstddef>
// We can provide simple Thumb simulation by decoding the Thumb instruction into its corresponding
// ARM instruction, and using the existing ARM simulator.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,496 @@
struct ARMul_State;
typedef unsigned int (*shtop_fp_t)(ARMul_State* cpu, unsigned int sht_oper);
enum class TransExtData {
COND = (1 << 0),
NON_BRANCH = (1 << 1),
DIRECT_BRANCH = (1 << 2),
INDIRECT_BRANCH = (1 << 3),
CALL = (1 << 4),
RET = (1 << 5),
END_OF_PAGE = (1 << 6),
THUMB = (1 << 7),
SINGLE_STEP = (1 << 8)
};
struct arm_inst {
unsigned int idx;
unsigned int cond;
TransExtData br;
#ifdef __GNUC__
__extension__
#endif
char component[0];
};
struct generic_arm_inst {
u32 Ra;
u32 Rm;
u32 Rn;
u32 Rd;
u8 op1;
u8 op2;
};
struct adc_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct add_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct orr_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct and_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct eor_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct bbl_inst {
unsigned int L;
int signed_immed_24;
unsigned int next_addr;
unsigned int jmp_addr;
};
struct bx_inst {
unsigned int Rm;
};
struct blx_inst {
union {
s32 signed_immed_24;
u32 Rm;
} val;
unsigned int inst;
};
struct clz_inst {
unsigned int Rm;
unsigned int Rd;
};
struct cps_inst {
unsigned int imod0;
unsigned int imod1;
unsigned int mmod;
unsigned int A, I, F;
unsigned int mode;
};
struct clrex_inst {
};
struct cpy_inst {
unsigned int Rm;
unsigned int Rd;
};
struct bic_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct sub_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct tst_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct cmn_inst {
unsigned int I;
unsigned int Rn;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct teq_inst {
unsigned int I;
unsigned int Rn;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct stm_inst {
unsigned int inst;
};
struct bkpt_inst {
u32 imm;
};
struct stc_inst {
};
struct ldc_inst {
};
struct swi_inst {
unsigned int num;
};
struct cmp_inst {
unsigned int I;
unsigned int Rn;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct mov_inst {
unsigned int I;
unsigned int S;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct mvn_inst {
unsigned int I;
unsigned int S;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct rev_inst {
unsigned int Rd;
unsigned int Rm;
unsigned int op1;
unsigned int op2;
};
struct rsb_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct rsc_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct sbc_inst {
unsigned int I;
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int shifter_operand;
shtop_fp_t shtop_func;
};
struct mul_inst {
unsigned int S;
unsigned int Rd;
unsigned int Rs;
unsigned int Rm;
};
struct smul_inst {
unsigned int Rd;
unsigned int Rs;
unsigned int Rm;
unsigned int x;
unsigned int y;
};
struct umull_inst {
unsigned int S;
unsigned int RdHi;
unsigned int RdLo;
unsigned int Rs;
unsigned int Rm;
};
struct smlad_inst {
unsigned int m;
unsigned int Rm;
unsigned int Rd;
unsigned int Ra;
unsigned int Rn;
unsigned int op1;
unsigned int op2;
};
struct smla_inst {
unsigned int x;
unsigned int y;
unsigned int Rm;
unsigned int Rd;
unsigned int Rs;
unsigned int Rn;
};
struct smlalxy_inst {
unsigned int x;
unsigned int y;
unsigned int RdLo;
unsigned int RdHi;
unsigned int Rm;
unsigned int Rn;
};
struct ssat_inst {
unsigned int Rn;
unsigned int Rd;
unsigned int imm5;
unsigned int sat_imm;
unsigned int shift_type;
};
struct umaal_inst {
unsigned int Rn;
unsigned int Rm;
unsigned int RdHi;
unsigned int RdLo;
};
struct umlal_inst {
unsigned int S;
unsigned int Rm;
unsigned int Rs;
unsigned int RdHi;
unsigned int RdLo;
};
struct smlal_inst {
unsigned int S;
unsigned int Rm;
unsigned int Rs;
unsigned int RdHi;
unsigned int RdLo;
};
struct smlald_inst {
unsigned int RdLo;
unsigned int RdHi;
unsigned int Rm;
unsigned int Rn;
unsigned int swap;
unsigned int op1;
unsigned int op2;
};
struct mla_inst {
unsigned int S;
unsigned int Rn;
unsigned int Rd;
unsigned int Rs;
unsigned int Rm;
};
struct mrc_inst {
unsigned int opcode_1;
unsigned int opcode_2;
unsigned int cp_num;
unsigned int crn;
unsigned int crm;
unsigned int Rd;
unsigned int inst;
};
struct mcr_inst {
unsigned int opcode_1;
unsigned int opcode_2;
unsigned int cp_num;
unsigned int crn;
unsigned int crm;
unsigned int Rd;
unsigned int inst;
};
struct mcrr_inst {
unsigned int opcode_1;
unsigned int cp_num;
unsigned int crm;
unsigned int rt;
unsigned int rt2;
};
struct mrs_inst {
unsigned int R;
unsigned int Rd;
};
struct msr_inst {
unsigned int field_mask;
unsigned int R;
unsigned int inst;
};
struct pld_inst {
};
struct sxtb_inst {
unsigned int Rd;
unsigned int Rm;
unsigned int rotate;
};
struct sxtab_inst {
unsigned int Rd;
unsigned int Rn;
unsigned int Rm;
unsigned rotate;
};
struct sxtah_inst {
unsigned int Rd;
unsigned int Rn;
unsigned int Rm;
unsigned int rotate;
};
struct sxth_inst {
unsigned int Rd;
unsigned int Rm;
unsigned int rotate;
};
struct uxtab_inst {
unsigned int Rn;
unsigned int Rd;
unsigned int rotate;
unsigned int Rm;
};
struct uxtah_inst {
unsigned int Rn;
unsigned int Rd;
unsigned int rotate;
unsigned int Rm;
};
struct uxth_inst {
unsigned int Rd;
unsigned int Rm;
unsigned int rotate;
};
struct cdp_inst {
unsigned int opcode_1;
unsigned int CRn;
unsigned int CRd;
unsigned int cp_num;
unsigned int opcode_2;
unsigned int CRm;
unsigned int inst;
};
struct uxtb_inst {
unsigned int Rd;
unsigned int Rm;
unsigned int rotate;
};
struct swp_inst {
unsigned int Rn;
unsigned int Rd;
unsigned int Rm;
};
struct setend_inst {
unsigned int set_bigend;
};
struct b_2_thumb {
unsigned int imm;
};
struct b_cond_thumb {
unsigned int imm;
unsigned int cond;
};
struct bl_1_thumb {
unsigned int imm;
};
struct bl_2_thumb {
unsigned int imm;
};
struct blx_1_thumb {
unsigned int imm;
unsigned int instr;
};
struct pkh_inst {
unsigned int Rm;
unsigned int Rn;
unsigned int Rd;
unsigned char imm;
};
// Floating point VFPv3 structures
#define VFP_INTERPRETER_STRUCT
#include "skyeye_interpreter/skyeye_common/vfp/vfpinstr.cpp"
#undef VFP_INTERPRETER_STRUCT
typedef void (*get_addr_fp_t)(ARMul_State *cpu, unsigned int inst, unsigned int &virt_addr);
struct ldst_inst {
unsigned int inst;
get_addr_fp_t get_addr;
};
typedef arm_inst* ARM_INST_PTR;
typedef ARM_INST_PTR (*transop_fp_t)(unsigned int, int);
extern const transop_fp_t arm_instruction_trans[];
extern const size_t arm_instruction_trans_len;
#define TRANS_CACHE_SIZE (64 * 1024 * 2000)
extern char trans_cache_buf[TRANS_CACHE_SIZE];
extern size_t trans_cache_buf_top;

View file

@ -188,13 +188,25 @@ void ARMul_State::ResetMPCoreCP15Registers()
CP15[CP15_TLB_DEBUG_CONTROL] = 0x00000000;
}
//static void CheckMemoryBreakpoint(u32 address, GDBStub::BreakpointType type)
//{
// if (GDBStub::g_server_enabled && GDBStub::CheckBreakpoint(address, type)) {
// LOG_DEBUG(Debug, "Found memory breakpoint @ %08x", address);
// GDBStub::Break(true);
// }
//}
u8 ARMul_State::ReadMemory8(u32 address) const
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read);
return (*user_callbacks.MemoryRead8)(address);
}
u16 ARMul_State::ReadMemory16(u32 address) const
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read);
u16 data = (*user_callbacks.MemoryRead16)(address);
if (InBigEndianMode())
@ -205,6 +217,8 @@ u16 ARMul_State::ReadMemory16(u32 address) const
u32 ARMul_State::ReadMemory32(u32 address) const
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read);
u32 data = (*user_callbacks.MemoryRead32)(address);
if (InBigEndianMode())
@ -215,6 +229,8 @@ u32 ARMul_State::ReadMemory32(u32 address) const
u64 ARMul_State::ReadMemory64(u32 address) const
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read);
u64 data = (*user_callbacks.MemoryRead64)(address);
if (InBigEndianMode())
@ -225,11 +241,15 @@ u64 ARMul_State::ReadMemory64(u32 address) const
void ARMul_State::WriteMemory8(u32 address, u8 data)
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Write);
(*user_callbacks.MemoryWrite8)(address, data);
}
void ARMul_State::WriteMemory16(u32 address, u16 data)
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Write);
if (InBigEndianMode())
data = Common::swap16(data);
@ -238,6 +258,8 @@ void ARMul_State::WriteMemory16(u32 address, u16 data)
void ARMul_State::WriteMemory32(u32 address, u32 data)
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Write);
if (InBigEndianMode())
data = Common::swap32(data);
@ -246,6 +268,8 @@ void ARMul_State::WriteMemory32(u32 address, u32 data)
void ARMul_State::WriteMemory64(u32 address, u64 data)
{
// CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Write);
if (InBigEndianMode())
data = Common::swap64(data);
@ -440,6 +464,7 @@ u32 ARMul_State::ReadCP15Register(u32 crn, u32 opcode_1, u32 crm, u32 opcode_2)
}
}
// LOG_ERROR(Core_ARM11, "MRC CRn=%u, CRm=%u, OP1=%u OP2=%u is not implemented. Returning zero.", crn, crm, opcode_1, opcode_2);
ASSERT_MSG(false, "MRC CRn=%u, CRm=%u, OP1=%u OP2=%u is not implemented. Returning zero.", crn, crm, opcode_1, opcode_2);
return 0;
}
@ -496,7 +521,6 @@ void ARMul_State::WriteCP15Register(u32 value, u32 crn, u32 opcode_1, u32 crm, u
else if (crm == 4 && opcode_2 == 0)
{
// NOTE: Not entirely accurate. This should do permission checks.
// TODO: Implement this maybe.
//CP15[CP15_PHYS_ADDRESS] = Memory::VirtualToPhysicalAddress(value);
}
else if (crm == 5)

View file

@ -15,6 +15,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
//#include "common/logging/log.h"
#include "skyeye_interpreter/skyeye_common/arm_regformat.h"
#include "skyeye_interpreter/skyeye_common/armstate.h"
#include "skyeye_interpreter/skyeye_common/armsupp.h"

View file

@ -21,12 +21,17 @@
/* Note: this file handles interface with arm core and vfp registers */
#include "common/assert.h"
//#include "common/common_funcs.h"
#include "common/common_types.h"
//#include "common/logging/log.h"
#include "skyeye_interpreter/skyeye_common/armstate.h"
#include "skyeye_interpreter/skyeye_common/vfp/asm_vfp.h"
#include "skyeye_interpreter/skyeye_common/vfp/vfp.h"
#define LOG_INFO(...) do{}while(0)
#define LOG_TRACE(...) do{}while(0)
void VFPInit(ARMul_State* state)
{
state->VFP[VFP_FPSID] = VFP_FPSID_IMPLMEN<<24 | VFP_FPSID_SW<<23 | VFP_FPSID_SUBARCH<<16 |
@ -112,26 +117,26 @@ void VMOVR(ARMul_State* state, u32 single, u32 d, u32 m)
/* Miscellaneous functions */
s32 vfp_get_float(ARMul_State* state, unsigned int reg)
{
//LOG_TRACE(Core_ARM11, "VFP get float: s%d=[%08x]", reg, state->ExtReg[reg]);
LOG_TRACE(Core_ARM11, "VFP get float: s%d=[%08x]", reg, state->ExtReg[reg]);
return state->ExtReg[reg];
}
void vfp_put_float(ARMul_State* state, s32 val, unsigned int reg)
{
//LOG_TRACE(Core_ARM11, "VFP put float: s%d <= [%08x]", reg, val);
LOG_TRACE(Core_ARM11, "VFP put float: s%d <= [%08x]", reg, val);
state->ExtReg[reg] = val;
}
u64 vfp_get_double(ARMul_State* state, unsigned int reg)
{
u64 result = ((u64) state->ExtReg[reg*2+1])<<32 | state->ExtReg[reg*2];
//LOG_TRACE(Core_ARM11, "VFP get double: s[%d-%d]=[%016llx]", reg * 2 + 1, reg * 2, result);
LOG_TRACE(Core_ARM11, "VFP get double: s[%d-%d]=[%016llx]", reg * 2 + 1, reg * 2, result);
return result;
}
void vfp_put_double(ARMul_State* state, u64 val, unsigned int reg)
{
//LOG_TRACE(Core_ARM11, "VFP put double: s[%d-%d] <= [%08x-%08x]", reg * 2 + 1, reg * 2, (u32)(val >> 32), (u32)(val & 0xffffffff));
LOG_TRACE(Core_ARM11, "VFP put double: s[%d-%d] <= [%08x-%08x]", reg * 2 + 1, reg * 2, (u32)(val >> 32), (u32)(val & 0xffffffff));
state->ExtReg[reg*2] = (u32) (val & 0xffffffff);
state->ExtReg[reg*2+1] = (u32) (val>>32);
}
@ -141,11 +146,12 @@ void vfp_put_double(ARMul_State* state, u64 val, unsigned int reg)
*/
void vfp_raise_exceptions(ARMul_State* state, u32 exceptions, u32 inst, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "VFP: raising exceptions %08x", exceptions);
LOG_TRACE(Core_ARM11, "VFP: raising exceptions %08x", exceptions);
if (exceptions == VFP_EXCEPTION_ERROR) {
// LOG_CRITICAL(Core_ARM11, "unhandled bounce %x", inst);
// Crash();
ASSERT_MSG(false, "unhandled bounce %x", inst);
exit(-1);
}
/*

View file

@ -22,7 +22,10 @@
#include "skyeye_interpreter/skyeye_common/vfp/vfp_helper.h" /* for references to cdp SoftFloat functions */
#define VFP_DEBUG_UNTESTED(x) //LOG_TRACE(Core_ARM11, "in func %s, " #x " untested", __FUNCTION__);
#define LOG_INFO(...) do{}while(0)
#define LOG_TRACE(...) do{}while(0)
#define VFP_DEBUG_UNTESTED(x) LOG_TRACE(Core_ARM11, "in func %s, " #x " untested", __FUNCTION__);
#define CHECK_VFP_ENABLED
#define CHECK_VFP_CDP_RET vfp_raise_exceptions(cpu, ret, inst_cream->instr, cpu->VFP[VFP_FPSCR]);

View file

@ -37,6 +37,9 @@
#include "skyeye_interpreter/skyeye_common/armstate.h"
#include "skyeye_interpreter/skyeye_common/vfp/asm_vfp.h"
#define LOG_INFO(...) do{}while(0)
#define LOG_TRACE(...) do{}while(0)
#define do_div(n, base) {n/=base;}
enum : u32 {
@ -271,8 +274,9 @@ inline int vfp_single_type(const vfp_single* s)
// Unpack a single-precision float. Note that this returns the magnitude
// of the single-precision float mantissa with the 1. if necessary,
// aligned to bit 30.
inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
inline u32 vfp_single_unpack(vfp_single* s, s32 val, u32 fpscr)
{
u32 exceptions = 0;
s->sign = vfp_single_packed_sign(val) >> 16,
s->exponent = vfp_single_packed_exponent(val);
@ -283,12 +287,13 @@ inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
// If flush-to-zero mode is enabled, turn the denormal into zero.
// On a VFPv2 architecture, the sign of the zero is always positive.
if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) {
if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) {
s->sign = 0;
s->exponent = 0;
s->significand = 0;
*fpscr |= FPSCR_IDC;
exceptions |= FPSCR_IDC;
}
return exceptions;
}
// Re-pack a single-precision float. This assumes that the float is
@ -302,7 +307,7 @@ inline s32 vfp_single_pack(const vfp_single* s)
}
u32 vfp_single_normaliseround(ARMul_State* state, int sd, vfp_single* vs, u32 fpscr, u32 exceptions, const char* func);
u32 vfp_single_normaliseround(ARMul_State* state, int sd, vfp_single* vs, u32 fpscr, const char* func);
// Double-precision
struct vfp_double {
@ -357,8 +362,9 @@ inline int vfp_double_type(const vfp_double* s)
// Unpack a double-precision float. Note that this returns the magnitude
// of the double-precision float mantissa with the 1. if necessary,
// aligned to bit 62.
inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
inline u32 vfp_double_unpack(vfp_double* s, s64 val, u32 fpscr)
{
u32 exceptions = 0;
s->sign = vfp_double_packed_sign(val) >> 48;
s->exponent = vfp_double_packed_exponent(val);
@ -369,12 +375,13 @@ inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
// If flush-to-zero mode is enabled, turn the denormal into zero.
// On a VFPv2 architecture, the sign of the zero is always positive.
if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) {
if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) {
s->sign = 0;
s->exponent = 0;
s->significand = 0;
*fpscr |= FPSCR_IDC;
exceptions |= FPSCR_IDC;
}
return exceptions;
}
// Re-pack a double-precision float. This assumes that the float is
@ -447,4 +454,4 @@ inline u32 fls(u32 x)
u32 vfp_double_multiply(vfp_double* vdd, vfp_double* vdn, vfp_double* vdm, u32 fpscr);
u32 vfp_double_add(vfp_double* vdd, vfp_double* vdn, vfp_double *vdm, u32 fpscr);
u32 vfp_double_normaliseround(ARMul_State* state, int dd, vfp_double* vd, u32 fpscr, u32 exceptions, const char* func);
u32 vfp_double_normaliseround(ARMul_State* state, int dd, vfp_double* vd, u32 fpscr, const char* func);

View file

@ -52,10 +52,14 @@
*/
#include <algorithm>
//#include "common/logging/log.h"
#include "skyeye_interpreter/skyeye_common/vfp/vfp.h"
#include "skyeye_interpreter/skyeye_common/vfp/vfp_helper.h"
#include "skyeye_interpreter/skyeye_common/vfp/asm_vfp.h"
#define LOG_INFO(...) do{}while(0)
#define LOG_TRACE(...) do{}while(0)
static struct vfp_double vfp_double_default_qnan = {
2047,
0,
@ -64,8 +68,8 @@ static struct vfp_double vfp_double_default_qnan = {
static void vfp_double_dump(const char *str, struct vfp_double *d)
{
//LOG_TRACE(Core_ARM11, "VFP: %s: sign=%d exponent=%d significand=%016llx",
// str, d->sign != 0, d->exponent, d->significand);
LOG_TRACE(Core_ARM11, "VFP: %s: sign=%d exponent=%d significand=%016llx",
str, d->sign != 0, d->exponent, d->significand);
}
static void vfp_double_normalise_denormal(struct vfp_double *vd)
@ -84,11 +88,12 @@ static void vfp_double_normalise_denormal(struct vfp_double *vd)
vfp_double_dump("normalise_denormal: out", vd);
}
u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, const char *func)
{
u64 significand, incr;
int exponent, shift, underflow;
u32 rmode;
u32 exceptions = 0;
vfp_double_dump("pack: in", vd);
@ -154,7 +159,7 @@ u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd,
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0))
incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1;
//LOG_TRACE(Core_ARM11, "VFP: rounding increment = 0x%08llx", incr);
LOG_TRACE(Core_ARM11, "VFP: rounding increment = 0x%08llx", incr);
/*
* Is our rounding going to overflow?
@ -209,8 +214,8 @@ pack:
vfp_double_dump("pack: final", vd);
{
s64 d = vfp_double_pack(vd);
//LOG_TRACE(Core_ARM11, "VFP: %s: d(d%d)=%016llx exceptions=%08x", func,
// dd, d, exceptions);
LOG_TRACE(Core_ARM11, "VFP: %s: d(d%d)=%016llx exceptions=%08x", func,
dd, d, exceptions);
vfp_put_double(state, d, dd);
}
return exceptions;
@ -258,7 +263,7 @@ vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
/*
* If one was a signalling NAN, raise invalid operation.
*/
return (tn == VFP_SNAN || tm == VFP_SNAN) ? (u32)FPSCR_IOC : (u32)VFP_NAN_FLAG;
return (tn == VFP_SNAN || tm == VFP_SNAN) ? u32(FPSCR_IOC) : u32(VFP_NAN_FLAG);
}
/*
@ -266,32 +271,33 @@ vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
*/
static u32 vfp_double_fabs(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_put_double(state, vfp_double_packed_abs(vfp_get_double(state, dm)), dd);
return 0;
}
static u32 vfp_double_fcpy(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_put_double(state, vfp_get_double(state, dm), dd);
return 0;
}
static u32 vfp_double_fneg(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_put_double(state, vfp_double_packed_negate(vfp_get_double(state, dm)), dd);
return 0;
}
static u32 vfp_double_fsqrt(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double vdm, vdd, *vdp;
int ret, tm;
u32 exceptions = 0;
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
tm = vfp_double_type(&vdm);
if (tm & (VFP_NAN|VFP_INFINITY)) {
@ -368,7 +374,8 @@ sqrt_invalid:
}
vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt");
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fsqrt");
return exceptions;
}
/*
@ -382,7 +389,7 @@ static u32 vfp_compare(ARMul_State* state, int dd, int signal_on_qnan, int dm, u
s64 d, m;
u32 ret = 0;
//LOG_TRACE(Core_ARM11, "In %s, state=0x%p, fpscr=0x%x", __FUNCTION__, state, fpscr);
LOG_TRACE(Core_ARM11, "In %s, state=0x%p, fpscr=0x%x", __FUNCTION__, state, fpscr);
m = vfp_get_double(state, dm);
if (vfp_double_packed_exponent(m) == 2047 && vfp_double_packed_mantissa(m)) {
ret |= FPSCR_CFLAG | FPSCR_VFLAG;
@ -437,32 +444,32 @@ static u32 vfp_compare(ARMul_State* state, int dd, int signal_on_qnan, int dm, u
ret |= FPSCR_CFLAG;
}
}
//LOG_TRACE(Core_ARM11, "In %s, state=0x%p, ret=0x%x", __FUNCTION__, state, ret);
LOG_TRACE(Core_ARM11, "In %s, state=0x%p, ret=0x%x", __FUNCTION__, state, ret);
return ret;
}
static u32 vfp_double_fcmp(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_compare(state, dd, 0, dm, fpscr);
}
static u32 vfp_double_fcmpe(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_compare(state, dd, 1, dm, fpscr);
}
static u32 vfp_double_fcmpz(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_compare(state, dd, 0, VFP_REG_ZERO, fpscr);
}
static u32 vfp_double_fcmpez(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_compare(state, dd, 1, VFP_REG_ZERO, fpscr);
}
@ -473,8 +480,8 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32
int tm;
u32 exceptions = 0;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
tm = vfp_double_type(&vdm);
@ -503,7 +510,8 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32
else
vsd.exponent = vdm.exponent - (1023 - 127);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fcvts");
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fcvts");
return exceptions;
pack_nan:
vfp_put_float(state, vfp_single_pack(&vsd), sd);
@ -513,27 +521,31 @@ pack_nan:
static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
u32 exceptions = 0;
u32 m = vfp_get_float(state, dm);
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vdm.sign = 0;
vdm.exponent = 1023 + 63 - 1;
vdm.significand = (u64)m;
return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fuito");
exceptions |= vfp_double_normaliseround(state, dd, &vdm, fpscr, "fuito");
return exceptions;
}
static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
u32 exceptions = 0;
u32 m = vfp_get_float(state, dm);
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vdm.sign = (m & 0x80000000) >> 16;
vdm.exponent = 1023 + 63 - 1;
vdm.significand = vdm.sign ? (~m + 1) : m;
return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fsito");
exceptions |= vfp_double_normaliseround(state, dd, &vdm, fpscr, "fsito");
return exceptions;
}
static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
@ -543,8 +555,8 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
/*
* Do we have a denormalised number?
@ -559,7 +571,7 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32
if (vdm.exponent >= 1023 + 32) {
d = vdm.sign ? 0 : 0xffffffff;
exceptions = FPSCR_IOC;
} else if (vdm.exponent >= 1023 - 1) {
} else if (vdm.exponent >= 1023) {
int shift = 1023 + 63 - vdm.exponent;
u64 rem, incr = 0;
@ -594,17 +606,25 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32
} else {
d = 0;
if (vdm.exponent | vdm.significand) {
if (rmode == FPSCR_ROUND_NEAREST) {
if (vdm.exponent >= 1022) {
d = vdm.sign ? 0 : 1;
exceptions |= vdm.sign ? FPSCR_IOC : FPSCR_IXC;
} else {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
}
} else if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) {
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) {
d = 0;
exceptions |= FPSCR_IOC;
exceptions |= FPSCR_IXC;
} else if (rmode == FPSCR_ROUND_MINUSINF) {
exceptions |= vdm.sign ? FPSCR_IOC : FPSCR_IXC;
} else {
exceptions |= FPSCR_IXC;
}
}
}
//LOG_TRACE(Core_ARM11, "VFP: ftoui: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
LOG_TRACE(Core_ARM11, "VFP: ftoui: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
vfp_put_float(state, d, sd);
@ -613,8 +633,8 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32
static u32 vfp_double_ftouiz(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_ftoui(state, sd, unused, dm, FPSCR_ROUND_TOZERO);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_ftoui(state, sd, unused, dm, (fpscr & ~FPSCR_RMODE_MASK) | FPSCR_ROUND_TOZERO);
}
static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
@ -624,8 +644,8 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
vfp_double_dump("VDM", &vdm);
/*
@ -638,12 +658,12 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32
if (tm & VFP_NAN) {
d = 0;
exceptions |= FPSCR_IOC;
} else if (vdm.exponent >= 1023 + 32) {
} else if (vdm.exponent >= 1023 + 31) {
d = 0x7fffffff;
if (vdm.sign)
d = ~d;
exceptions |= FPSCR_IOC;
} else if (vdm.exponent >= 1023 - 1) {
} else if (vdm.exponent >= 1023) {
int shift = 1023 + 63 - vdm.exponent; /* 58 */
u64 rem, incr = 0;
@ -674,14 +694,21 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32
d = 0;
if (vdm.exponent | vdm.significand) {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
if (rmode == FPSCR_ROUND_NEAREST) {
if (vdm.exponent >= 1022) {
d = vdm.sign ? 0xffffffff : 1;
} else {
d = 0;
}
} else if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) {
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign)
d = -1;
} else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) {
d = 0xffffffff;
}
}
}
//LOG_TRACE(Core_ARM11, "VFP: ftosi: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
LOG_TRACE(Core_ARM11, "VFP: ftosi: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
vfp_put_float(state, (s32)d, sd);
@ -690,8 +717,8 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32
static u32 vfp_double_ftosiz(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_ftosi(state, dd, unused, dm, FPSCR_ROUND_TOZERO);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_ftosi(state, dd, unused, dm, (fpscr & ~FPSCR_RMODE_MASK) | FPSCR_ROUND_TOZERO);
}
static struct op fops_ext[] = {
@ -774,7 +801,7 @@ u32 vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn,struct vfp_dou
if (vdn->significand & (1ULL << 63) ||
vdm->significand & (1ULL << 63)) {
// LOG_INFO(Core_ARM11, "VFP: bad FP values in %s", __func__);
LOG_INFO(Core_ARM11, "VFP: bad FP values in %s", __func__);
vfp_double_dump("VDN", vdn);
vfp_double_dump("VDM", vdm);
}
@ -842,7 +869,7 @@ vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn,
*/
if (vdn->exponent < vdm->exponent) {
std::swap(vdm, vdn);
//LOG_TRACE(Core_ARM11, "VFP: swapping M <-> N");
LOG_TRACE(Core_ARM11, "VFP: swapping M <-> N");
}
vdd->sign = vdn->sign ^ vdm->sign;
@ -891,21 +918,21 @@ static u32
vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, const char *func)
{
struct vfp_double vdd, vdp, vdn, vdm;
u32 exceptions;
u32 exceptions = 0;
vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
exceptions |= vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
if (negate & NEG_MULTIPLY)
vdp.sign = vfp_sign_negate(vdp.sign);
vfp_double_unpack(&vdn, vfp_get_double(state, dd), &fpscr);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dd), fpscr);
if (vdn.exponent == 0 && vdn.significand != 0)
vfp_double_normalise_denormal(&vdn);
@ -914,7 +941,8 @@ vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 f
exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, func);
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, func);
return exceptions;
}
/*
@ -926,7 +954,7 @@ vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 f
*/
static u32 vfp_double_fmac(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, 0, "fmac");
}
@ -935,7 +963,7 @@ static u32 vfp_double_fmac(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fnmac(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac");
}
@ -944,7 +972,7 @@ static u32 vfp_double_fnmac(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
*/
static u32 vfp_double_fmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc");
}
@ -953,7 +981,7 @@ static u32 vfp_double_fmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
}
@ -963,19 +991,21 @@ static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
u32 exceptions = 0;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fmul");
exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fmul");
return exceptions;
}
/*
@ -984,21 +1014,22 @@ static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
u32 exceptions = 0;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
vdd.sign = vfp_sign_negate(vdd.sign);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fnmul");
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fnmul");
return exceptions;
}
/*
@ -1007,20 +1038,21 @@ static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
u32 exceptions = 0;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fadd");
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fadd");
return exceptions;
}
/*
@ -1029,14 +1061,14 @@ static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
u32 exceptions = 0;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
@ -1045,9 +1077,10 @@ static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
vdm.sign = vfp_sign_negate(vdm.sign);
exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fsub");
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fsub");
return exceptions;
}
/*
@ -1059,9 +1092,9 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
u32 exceptions = 0;
int tm, tn;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
vdd.sign = vdn.sign ^ vdm.sign;
@ -1130,16 +1163,18 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
}
vdd.significand |= (reml != 0);
}
return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fdiv");
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fdiv");
return exceptions;
vdn_nan:
exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
exceptions |= vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
pack:
vfp_put_double(state, vfp_double_pack(&vdd), dd);
return exceptions;
vdm_nan:
exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
exceptions |= vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
goto pack;
zero:
@ -1148,7 +1183,7 @@ zero:
goto pack;
divzero:
exceptions = FPSCR_DZC;
exceptions |= FPSCR_DZC;
infinity:
vdd.exponent = 2047;
vdd.significand = 0;
@ -1156,7 +1191,8 @@ infinity:
invalid:
vfp_put_double(state, vfp_double_pack(&vfp_double_default_qnan), dd);
return FPSCR_IOC;
exceptions |= FPSCR_IOC;
return exceptions;
}
static struct op fops[] = {
@ -1184,7 +1220,7 @@ u32 vfp_double_cpdo(ARMul_State* state, u32 inst, u32 fpscr)
unsigned int vecitr, veclen, vecstride;
struct op *fop;
//LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK));
fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
@ -1215,8 +1251,8 @@ u32 vfp_double_cpdo(ARMul_State* state, u32 inst, u32 fpscr)
else
veclen = fpscr & FPSCR_LENGTH_MASK;
//LOG_TRACE(Core_ARM11, "VFP: vecstride=%u veclen=%u", vecstride,
// (veclen >> FPSCR_LENGTH_BIT) + 1);
LOG_TRACE(Core_ARM11, "VFP: vecstride=%u veclen=%u", vecstride,
(veclen >> FPSCR_LENGTH_BIT) + 1);
if (!fop->fn) {
printf("VFP: could not find double op %d\n", FEXT_TO_IDX(inst));
@ -1225,23 +1261,21 @@ u32 vfp_double_cpdo(ARMul_State* state, u32 inst, u32 fpscr)
for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
u32 except;
char type;
// char type;
type = (fop->flags & OP_SD) ? 's' : 'd';
(void)type;
//if (op == FOP_EXT)
// LOG_TRACE(Core_ARM11, "VFP: itr%d (%c%u) = op[%u] (d%u)",
// vecitr >> FPSCR_LENGTH_BIT,
// type, dest, dn, dm);
//else
// LOG_TRACE(Core_ARM11, "VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)",
// vecitr >> FPSCR_LENGTH_BIT,
// type, dest, dn, FOP_TO_IDX(op), dm);
// type = (fop->flags & OP_SD) ? 's' : 'd';
if (op == FOP_EXT)
LOG_TRACE(Core_ARM11, "VFP: itr%d (%c%u) = op[%u] (d%u)",
vecitr >> FPSCR_LENGTH_BIT,
type, dest, dn, dm);
else
LOG_TRACE(Core_ARM11, "VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)",
vecitr >> FPSCR_LENGTH_BIT,
type, dest, dn, FOP_TO_IDX(op), dm);
except = fop->fn(state, dest, dn, dm, fpscr);
//LOG_TRACE(Core_ARM11, "VFP: itr%d: exceptions=%08x",
// vecitr >> FPSCR_LENGTH_BIT, except);
LOG_TRACE(Core_ARM11, "VFP: itr%d: exceptions=%08x",
vecitr >> FPSCR_LENGTH_BIT, except);
exceptions |= except;

View file

@ -26,7 +26,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmla)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -75,7 +75,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmls)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -124,7 +124,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vnmla)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -174,7 +174,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vnmls)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -223,7 +223,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vnmul)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -272,7 +272,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmul)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -321,7 +321,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vadd)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -370,7 +370,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vsub)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -419,7 +419,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vdiv)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -470,7 +470,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovi)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->d = (inst_cream->single ? BITS(inst,12,15)<<1 | BIT(inst,22) : BITS(inst,12,15) | BIT(inst,22)<<4);
@ -518,7 +518,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovr)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->d = (inst_cream->single ? BITS(inst,12,15)<<1 | BIT(inst,22) : BITS(inst,12,15) | BIT(inst,22)<<4);
@ -560,7 +560,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vabs)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -610,7 +610,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vneg)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -659,7 +659,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vsqrt)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -708,7 +708,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vcmp)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -757,7 +757,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vcmp2)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -806,7 +806,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vcvtbds)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -857,7 +857,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vcvtbff)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -906,7 +906,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vcvtbfi)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->dp_operation = BIT(inst, 8);
inst_cream->instr = inst;
@ -962,7 +962,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovbrs)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->to_arm = BIT(inst, 20) == 1;
inst_cream->t = BITS(inst, 12, 15);
@ -1006,7 +1006,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmsr)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->reg = BITS(inst, 16, 19);
inst_cream->Rt = BITS(inst, 12, 15);
@ -1069,7 +1069,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovbrc)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->d = BITS(inst, 16, 19)|BIT(inst, 7)<<4;
inst_cream->t = BITS(inst, 12, 15);
@ -1115,7 +1115,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmrs)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->reg = BITS(inst, 16, 19);
inst_cream->Rt = BITS(inst, 12, 15);
@ -1200,7 +1200,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovbcr)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->d = BITS(inst, 16, 19)|BIT(inst, 7)<<4;
inst_cream->t = BITS(inst, 12, 15);
@ -1253,7 +1253,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovbrrss)(unsigned int inst, int inde
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->to_arm = BIT(inst, 20) == 1;
inst_cream->t = BITS(inst, 12, 15);
@ -1301,7 +1301,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vmovbrrd)(unsigned int inst, int index
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->to_arm = BIT(inst, 20) == 1;
inst_cream->t = BITS(inst, 12, 15);
@ -1354,7 +1354,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vstr)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->add = BIT(inst, 23);
@ -1420,7 +1420,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vpush)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->d = (inst_cream->single ? BITS(inst, 12, 15)<<1|BIT(inst, 22) : BITS(inst, 12, 15)|BIT(inst, 22)<<4);
@ -1495,7 +1495,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vstm)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->add = BIT(inst, 23);
@ -1580,7 +1580,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vpop)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->d = (inst_cream->single ? (BITS(inst, 12, 15)<<1)|BIT(inst, 22) : BITS(inst, 12, 15)|(BIT(inst, 22)<<4));
@ -1653,7 +1653,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vldr)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->add = BIT(inst, 23);
@ -1722,7 +1722,7 @@ static ARM_INST_PTR INTERPRETER_TRANSLATE(vldm)(unsigned int inst, int index)
inst_base->cond = BITS(inst, 28, 31);
inst_base->idx = index;
inst_base->br = NON_BRANCH;
inst_base->br = TransExtData::NON_BRANCH;
inst_cream->single = BIT(inst, 8) == 0;
inst_cream->add = BIT(inst, 23);

View file

@ -53,14 +53,20 @@
#include <algorithm>
#include <cinttypes>
#include <common/assert.h>
#include "common/assert.h"
//#include "common/common_funcs.h"
#include "common/common_types.h"
//#include "common/logging/log.h"
#include "skyeye_interpreter/skyeye_common/vfp/vfp_helper.h"
#include "skyeye_interpreter/skyeye_common/vfp/asm_vfp.h"
#include "skyeye_interpreter/skyeye_common/vfp/vfp.h"
#define LOG_INFO(...) do{}while(0)
#define LOG_TRACE(...) do{}while(0)
#define LOG_WARNING(...) do{}while(0)
static struct vfp_single vfp_single_default_qnan = {
255,
0,
@ -69,8 +75,8 @@ static struct vfp_single vfp_single_default_qnan = {
static void vfp_single_dump(const char *str, struct vfp_single *s)
{
//LOG_TRACE(Core_ARM11, "%s: sign=%d exponent=%d significand=%08x",
// str, s->sign != 0, s->exponent, s->significand);
LOG_TRACE(Core_ARM11, "%s: sign=%d exponent=%d significand=%08x",
str, s->sign != 0, s->exponent, s->significand);
}
static void vfp_single_normalise_denormal(struct vfp_single *vs)
@ -88,10 +94,11 @@ static void vfp_single_normalise_denormal(struct vfp_single *vs)
}
u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func)
u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, const char *func)
{
u32 significand, incr, rmode;
int exponent, shift, underflow;
u32 exceptions = 0;
vfp_single_dump("pack: in", vs);
@ -160,7 +167,7 @@ u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs,
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vs->sign != 0))
incr = (1 << (VFP_SINGLE_LOW_BITS + 1)) - 1;
//LOG_TRACE(Core_ARM11, "rounding increment = 0x%08x", incr);
LOG_TRACE(Core_ARM11, "rounding increment = 0x%08x", incr);
/*
* Is our rounding going to overflow?
@ -215,8 +222,8 @@ pack:
vfp_single_dump("pack: final", vs);
{
s32 d = vfp_single_pack(vs);
//LOG_TRACE(Core_ARM11, "%s: d(s%d)=%08x exceptions=%08x", func,
// sd, d, exceptions);
LOG_TRACE(Core_ARM11, "%s: d(s%d)=%08x exceptions=%08x", func,
sd, d, exceptions);
vfp_put_float(state, d, sd);
}
@ -265,7 +272,7 @@ vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
/*
* If one was a signalling NAN, raise invalid operation.
*/
return tn == VFP_SNAN || tm == VFP_SNAN ? (u32)FPSCR_IOC : (u32)VFP_NAN_FLAG;
return (tn == VFP_SNAN || tm == VFP_SNAN) ? u32(FPSCR_IOC) : u32(VFP_NAN_FLAG);
}
@ -306,7 +313,7 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand)
u32 z, a;
if ((significand & 0xc0000000) != 0x40000000) {
//LOG_TRACE(Core_ARM11, "invalid significand");
LOG_TRACE(Core_ARM11, "invalid significand");
}
a = significand << 1;
@ -333,8 +340,9 @@ static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 f
{
struct vfp_single vsm, vsd, *vsp;
int ret, tm;
u32 exceptions = 0;
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
tm = vfp_single_type(&vsm);
if (tm & (VFP_NAN|VFP_INFINITY)) {
vsp = &vsd;
@ -396,7 +404,7 @@ sqrt_invalid:
term = (u64)vsd.significand * vsd.significand;
rem = ((u64)vsm.significand << 32) - term;
//LOG_TRACE(Core_ARM11, "term=%016" PRIx64 "rem=%016" PRIx64, term, rem);
LOG_TRACE(Core_ARM11, "term=%016" PRIx64 "rem=%016" PRIx64, term, rem);
while (rem < 0) {
vsd.significand -= 1;
@ -407,7 +415,8 @@ sqrt_invalid:
}
vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt");
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fsqrt");
return exceptions;
}
/*
@ -502,7 +511,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
int tm;
u32 exceptions = 0;
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
tm = vfp_single_type(&vsm);
@ -510,7 +519,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
* If we have a signalling NaN, signal invalid operation.
*/
if (tm == VFP_SNAN)
exceptions = FPSCR_IOC;
exceptions |= FPSCR_IOC;
if (tm & VFP_DENORMAL)
vfp_single_normalise_denormal(&vsm);
@ -531,7 +540,8 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
else
vdd.exponent = vsm.exponent + (1023 - 127);
return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fcvtd");
exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fcvtd");
return exceptions;
pack_nan:
vfp_put_double(state, vfp_double_pack(&vdd), dd);
@ -541,23 +551,27 @@ pack_nan:
static u32 vfp_single_fuito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vs;
u32 exceptions = 0;
vs.sign = 0;
vs.exponent = 127 + 31 - 1;
vs.significand = (u32)m;
return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fuito");
exceptions |= vfp_single_normaliseround(state, sd, &vs, fpscr, "fuito");
return exceptions;
}
static u32 vfp_single_fsito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vs;
u32 exceptions = 0;
vs.sign = (m & 0x80000000) >> 16;
vs.exponent = 127 + 31 - 1;
vs.significand = vs.sign ? -m : m;
return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fsito");
exceptions |= vfp_single_normaliseround(state, sd, &vs, fpscr, "fsito");
return exceptions;
}
static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
@ -567,7 +581,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
vfp_single_dump("VSM", &vsm);
/*
@ -582,7 +596,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
if (vsm.exponent >= 127 + 32) {
d = vsm.sign ? 0 : 0xffffffff;
exceptions = FPSCR_IOC;
exceptions |= FPSCR_IOC;
} else if (vsm.exponent >= 127) {
int shift = 127 + 31 - vsm.exponent;
u32 rem, incr = 0;
@ -591,7 +605,11 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
* 2^0 <= m < 2^32-2^8
*/
d = (vsm.significand << 1) >> shift;
rem = vsm.significand << (33 - shift);
if (shift > 0) {
rem = (vsm.significand << 1) << (32 - shift);
} else {
rem = 0;
}
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 0x80000000;
@ -618,17 +636,25 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
} else {
d = 0;
if (vsm.exponent | vsm.significand) {
if (rmode == FPSCR_ROUND_NEAREST) {
if (vsm.exponent >= 126) {
d = vsm.sign ? 0 : 1;
exceptions |= vsm.sign ? FPSCR_IOC : FPSCR_IXC;
} else {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
}
} else if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0) {
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) {
d = 0;
exceptions |= FPSCR_IOC;
exceptions |= FPSCR_IXC;
} else if (rmode == FPSCR_ROUND_MINUSINF) {
exceptions |= vsm.sign ? FPSCR_IOC : FPSCR_IXC;
} else {
exceptions |= FPSCR_IXC;
}
}
}
//LOG_TRACE(Core_ARM11, "ftoui: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
LOG_TRACE(Core_ARM11, "ftoui: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
vfp_put_float(state, d, sd);
@ -637,7 +663,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
static u32 vfp_single_ftouiz(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
return vfp_single_ftoui(state, sd, unused, m, FPSCR_ROUND_TOZERO);
return vfp_single_ftoui(state, sd, unused, m, (fpscr & ~FPSCR_RMODE_MASK) | FPSCR_ROUND_TOZERO);
}
static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
@ -647,7 +673,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
vfp_single_dump("VSM", &vsm);
/*
@ -660,7 +686,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
if (tm & VFP_NAN) {
d = 0;
exceptions |= FPSCR_IOC;
} else if (vsm.exponent >= 127 + 32) {
} else if (vsm.exponent >= 127 + 31) {
/*
* m >= 2^31-2^7: invalid
*/
@ -674,7 +700,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
/* 2^0 <= m <= 2^31-2^7 */
d = (vsm.significand << 1) >> shift;
rem = vsm.significand << (33 - shift);
rem = (vsm.significand << 1) << (32 - shift);
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 0x80000000;
@ -700,14 +726,18 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
d = 0;
if (vsm.exponent | vsm.significand) {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
if (rmode == FPSCR_ROUND_NEAREST) {
if (vsm.exponent >= 126)
d = vsm.sign ? 0xffffffff : 1;
} else if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0) {
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign)
d = -1;
} else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) {
d = 0xffffffff;
}
}
}
//LOG_TRACE(Core_ARM11, "ftosi: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
LOG_TRACE(Core_ARM11, "ftosi: d(s%d)=%08x exceptions=%08x", sd, d, exceptions);
vfp_put_float(state, (s32)d, sd);
@ -716,7 +746,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
static u32 vfp_single_ftosiz(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
return vfp_single_ftosi(state, sd, unused, m, FPSCR_ROUND_TOZERO);
return vfp_single_ftosi(state, sd, unused, m, (fpscr & ~FPSCR_RMODE_MASK) | FPSCR_ROUND_TOZERO);
}
static struct op fops_ext[] = {
@ -773,7 +803,7 @@ vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
/*
* different signs -> invalid
*/
exceptions = FPSCR_IOC;
exceptions |= FPSCR_IOC;
vsp = &vfp_single_default_qnan;
} else {
/*
@ -804,7 +834,7 @@ vfp_single_add(struct vfp_single *vsd, struct vfp_single *vsn,
if (vsn->significand & 0x80000000 ||
vsm->significand & 0x80000000) {
//LOG_WARNING(Core_ARM11, "bad FP values");
LOG_WARNING(Core_ARM11, "bad FP values");
vfp_single_dump("VSN", vsn);
vfp_single_dump("VSM", vsm);
}
@ -871,7 +901,7 @@ vfp_single_multiply(struct vfp_single *vsd, struct vfp_single *vsn, struct vfp_s
*/
if (vsn->exponent < vsm->exponent) {
std::swap(vsm, vsn);
//LOG_TRACE(Core_ARM11, "swapping M <-> N");
LOG_TRACE(Core_ARM11, "swapping M <-> N");
}
vsd->sign = vsn->sign ^ vsm->sign;
@ -920,27 +950,27 @@ static u32
vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func)
{
vfp_single vsd, vsp, vsn, vsm;
u32 exceptions;
u32 exceptions = 0;
s32 v;
v = vfp_get_float(state, sn);
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v);
vfp_single_unpack(&vsn, v, &fpscr);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v);
exceptions |= vfp_single_unpack(&vsn, v, fpscr);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
exceptions |= vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
if (negate & NEG_MULTIPLY)
vsp.sign = vfp_sign_negate(vsp.sign);
v = vfp_get_float(state, sd);
//LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v);
vfp_single_unpack(&vsn, v, &fpscr);
LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v);
exceptions |= vfp_single_unpack(&vsn, v, fpscr);
if (vsn.exponent == 0 && vsn.significand != 0)
vfp_single_normalise_denormal(&vsn);
@ -949,7 +979,8 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp
exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, func);
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, func);
return exceptions;
}
/*
@ -961,8 +992,10 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp
*/
static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac");
u32 exceptions = 0;
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
exceptions |= vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac");
return exceptions;
}
/*
@ -971,7 +1004,7 @@ static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
static u32 vfp_single_fnmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
// TODO: this one has its arguments inverted, investigate.
//LOG_TRACE(Core_ARM11, "s%u = %08x", sd, sn);
LOG_TRACE(Core_ARM11, "s%u = %08x", sd, sn);
return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_MULTIPLY, "fnmac");
}
@ -980,7 +1013,7 @@ static u32 vfp_single_fnmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
*/
static u32 vfp_single_fmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_SUBTRACT, "fmsc");
}
@ -989,7 +1022,7 @@ static u32 vfp_single_fmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
}
@ -999,21 +1032,23 @@ static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions;
u32 exceptions = 0;
s32 n = vfp_get_float(state, sn);
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
vfp_single_unpack(&vsn, n, &fpscr);
exceptions |= vfp_single_unpack(&vsn, n, fpscr);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul");
exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fmul");
return exceptions;
}
/*
@ -1022,22 +1057,24 @@ static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions;
u32 exceptions = 0;
s32 n = vfp_get_float(state, sn);
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
vfp_single_unpack(&vsn, n, &fpscr);
exceptions |= vfp_single_unpack(&vsn, n, fpscr);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
vsd.sign = vfp_sign_negate(vsd.sign);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul");
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fnmul");
return exceptions;
}
/*
@ -1046,25 +1083,26 @@ static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions;
u32 exceptions = 0;
s32 n = vfp_get_float(state, sn);
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
/*
* Unpack and normalise denormals.
*/
vfp_single_unpack(&vsn, n, &fpscr);
exceptions |= vfp_single_unpack(&vsn, n, fpscr);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
exceptions |= vfp_single_add(&vsd, &vsn, &vsm, fpscr);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd");
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fadd");
return exceptions;
}
/*
@ -1072,7 +1110,7 @@ static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fsub(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
/*
* Subtraction is addition with one sign inverted.
*/
@ -1092,10 +1130,10 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
s32 n = vfp_get_float(state, sn);
int tm, tn;
//LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
vfp_single_unpack(&vsn, n, &fpscr);
vfp_single_unpack(&vsm, m, &fpscr);
exceptions |= vfp_single_unpack(&vsn, n, fpscr);
exceptions |= vfp_single_unpack(&vsm, m, fpscr);
vsd.sign = vsn.sign ^ vsm.sign;
@ -1161,16 +1199,17 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
if ((vsd.significand & 0x3f) == 0)
vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv");
exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fdiv");
return exceptions;
vsn_nan:
exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
exceptions |= vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
pack:
vfp_put_float(state, vfp_single_pack(&vsd), sd);
return exceptions;
vsm_nan:
exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
exceptions |= vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
goto pack;
zero:
@ -1179,7 +1218,7 @@ zero:
goto pack;
divzero:
exceptions = FPSCR_DZC;
exceptions |= FPSCR_DZC;
infinity:
vsd.exponent = 255;
vsd.significand = 0;
@ -1187,7 +1226,8 @@ infinity:
invalid:
vfp_put_float(state, vfp_single_pack(&vfp_single_default_qnan), sd);
return FPSCR_IOC;
exceptions |= FPSCR_IOC;
return exceptions;
}
static struct op fops[] = {
@ -1239,35 +1279,34 @@ u32 vfp_single_cpdo(ARMul_State* state, u32 inst, u32 fpscr)
else
veclen = fpscr & FPSCR_LENGTH_MASK;
//LOG_TRACE(Core_ARM11, "vecstride=%u veclen=%u", vecstride,
// (veclen >> FPSCR_LENGTH_BIT) + 1);
LOG_TRACE(Core_ARM11, "vecstride=%u veclen=%u", vecstride,
(veclen >> FPSCR_LENGTH_BIT) + 1);
if (!fop->fn) {
// LOG_CRITICAL(Core_ARM11, "could not find single op %d, inst=0x%x@0x%x", FEXT_TO_IDX(inst), inst, state->Reg[15]);
// Crash();
ASSERT_MSG(false, "could not find single op %d, inst=0x%x@0x%x", FEXT_TO_IDX(inst), inst, state->Reg[15]);
exit(-1);
goto invalid;
}
for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
s32 m = vfp_get_float(state, sm);
u32 except;
char type;
// char type;
type = (fop->flags & OP_DD) ? 'd' : 's';
(void)type;
//if (op == FOP_EXT)
// LOG_TRACE(Core_ARM11, "itr%d (%c%u) = op[%u] (s%u=%08x)",
// vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
// sm, m);
//else
// LOG_TRACE(Core_ARM11, "itr%d (%c%u) = (s%u) op[%u] (s%u=%08x)",
// vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
// FOP_TO_IDX(op), sm, m);
// type = (fop->flags & OP_DD) ? 'd' : 's';
if (op == FOP_EXT)
LOG_TRACE(Core_ARM11, "itr%d (%c%u) = op[%u] (s%u=%08x)",
vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
sm, m);
else
LOG_TRACE(Core_ARM11, "itr%d (%c%u) = (s%u) op[%u] (s%u=%08x)",
vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
FOP_TO_IDX(op), sm, m);
except = fop->fn(state, dest, sn, m, fpscr);
//LOG_TRACE(Core_ARM11, "itr%d: exceptions=%08x",
// vecitr >> FPSCR_LENGTH_BIT, except);
LOG_TRACE(Core_ARM11, "itr%d: exceptions=%08x",
vecitr >> FPSCR_LENGTH_BIT, except);
exceptions |= except;