backend/rv64: Use biscuit LI()

This commit is contained in:
Yang Liu 2024-01-19 17:57:45 +08:00 committed by Merry
parent f856ac9f33
commit b7cca7c53d
2 changed files with 2 additions and 28 deletions

View file

@ -21,29 +21,6 @@
namespace Dynarmic::Backend::RV64 { namespace Dynarmic::Backend::RV64 {
// TODO: We should really move this to biscuit.
void Mov64(biscuit::Assembler& as, biscuit::GPR rd, u64 imm) {
if (mcl::bit::sign_extend<32>(imm) == imm) {
as.LI(rd, static_cast<u32>(imm));
return;
}
// For 64-bit imm, a sequence of up to 8 instructions (i.e. LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) is emitted.
// In the following, imm is processed from LSB to MSB while instruction emission is performed from MSB to LSB by calling Mov64 recursively.
// In each recursion, the lowest 12 bits are removed from imm and the optimal shift amount is calculated.
// Then, the remaining part of imm is processed recursively and as.LI() get called as soon as it fits into 32 bits.
s32 lo12 = static_cast<s32>(mcl::bit::sign_extend<12>(imm));
/* Add 0x800 to cancel out the signed extension of ADDI. */
u64 hi52 = (imm + 0x800) >> 12;
int shift = 12 + std::countr_zero(hi52);
hi52 = mcl::bit::sign_extend(shift, hi52 >> (shift - 12));
Mov64(as, rd, hi52);
as.SLLI64(rd, rd, shift);
if (lo12 != 0) {
as.ADDI(rd, rd, lo12);
}
}
template<IR::Opcode op> template<IR::Opcode op>
void EmitIR(biscuit::Assembler&, EmitContext&, IR::Inst*) { void EmitIR(biscuit::Assembler&, EmitContext&, IR::Inst*) {
ASSERT_FALSE("Unimplemented opcode {}", op); ASSERT_FALSE("Unimplemented opcode {}", op);
@ -122,7 +99,7 @@ EmittedBlockInfo EmitRV64(biscuit::Assembler& as, IR::Block block, const EmitCon
const auto term = block.GetTerminal(); const auto term = block.GetTerminal();
const IR::Term::LinkBlock* link_block_term = boost::get<IR::Term::LinkBlock>(&term); const IR::Term::LinkBlock* link_block_term = boost::get<IR::Term::LinkBlock>(&term);
ASSERT(link_block_term); ASSERT(link_block_term);
Mov64(as, Xscratch0, link_block_term->next.Value()); as.LI(Xscratch0, link_block_term->next.Value());
as.SD(Xscratch0, offsetof(A32JitState, regs) + sizeof(u32) * 15, Xstate); as.SD(Xscratch0, offsetof(A32JitState, regs) + sizeof(u32) * 15, Xstate);
ptrdiff_t offset = reinterpret_cast<CodePtr>(as.GetCursorPointer()) - ebi.entry_point; ptrdiff_t offset = reinterpret_cast<CodePtr>(as.GetCursorPointer()) - ebi.entry_point;

View file

@ -16,9 +16,6 @@
namespace Dynarmic::Backend::RV64 { namespace Dynarmic::Backend::RV64 {
// TODO: We should really move this to biscuit.
void Mov64(biscuit::Assembler& as, biscuit::GPR rd, u64 imm);
constexpr size_t spill_offset = offsetof(StackLayout, spill); constexpr size_t spill_offset = offsetof(StackLayout, spill);
constexpr size_t spill_slot_size = sizeof(decltype(StackLayout::spill)::value_type); constexpr size_t spill_slot_size = sizeof(decltype(StackLayout::spill)::value_type);
@ -115,7 +112,7 @@ u32 RegAlloc::GenerateImmediate(const IR::Value& value) {
SpillGpr(new_location_index); SpillGpr(new_location_index);
gprs[new_location_index].SetupScratchLocation(); gprs[new_location_index].SetupScratchLocation();
Mov64(as, biscuit::GPR{new_location_index}, value.GetImmediateAsU64()); as.LI(biscuit::GPR{new_location_index}, value.GetImmediateAsU64());
return new_location_index; return new_location_index;
} else if constexpr (kind == HostLoc::Kind::Fpr) { } else if constexpr (kind == HostLoc::Kind::Fpr) {