diff options
author | 2020-07-10 18:57:22 +0700 | |
---|---|---|
committer | 2020-07-10 18:57:22 +0700 | |
commit | e60954a61a828de2b5593d549b0caf6138c02756 (patch) | |
tree | 664f6dde1658c5e5594fc5f9445997adc34ccf47 /src/devices/cpu/drcbex86.cpp | |
parent | 81e247ff16f8656b50f23788407325108f093df8 (diff) |
drcbex86: implemented asmjit emitter
Diffstat (limited to 'src/devices/cpu/drcbex86.cpp')
-rw-r--r-- | src/devices/cpu/drcbex86.cpp | 4639 |
1 files changed, 2024 insertions, 2615 deletions
diff --git a/src/devices/cpu/drcbex86.cpp b/src/devices/cpu/drcbex86.cpp index 794d1007418..647d3c4c351 100644 --- a/src/devices/cpu/drcbex86.cpp +++ b/src/devices/cpu/drcbex86.cpp @@ -92,16 +92,6 @@ // This is a trick to make it build on Android where the x86 SDK declares ::REG_Exx namespace drc { using namespace uml; -using namespace x86emit; - -using x86emit::REG_EAX; -using x86emit::REG_ECX; -using x86emit::REG_EDX; -using x86emit::REG_EBX; -using x86emit::REG_ESP; -using x86emit::REG_EBP; -using x86emit::REG_ESI; -using x86emit::REG_EDI; //************************************************************************** // DEBUGGING @@ -151,9 +141,9 @@ drcbe_x86::opcode_generate_func drcbe_x86::s_opcode_table[OP_MAX]; //static const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU }; // register mapping tables -static const uint8_t int_register_map[REG_I_COUNT] = +static const Gp::Id int_register_map[REG_I_COUNT] = { - REG_EBX, REG_ESI, REG_EDI, REG_EBP + Gp::kIdBx, Gp::kIdSi, Gp::kIdDi, Gp::kIdBp }; // flags mapping tables @@ -161,24 +151,24 @@ static uint8_t flags_map[0x1000]; static uint32_t flags_unmap[0x20]; // condition mapping table -static const uint8_t condition_map[uml::COND_MAX - uml::COND_Z] = -{ - x86emit::COND_Z, // COND_Z = 0x80, requires Z - x86emit::COND_NZ, // COND_NZ, requires Z - x86emit::COND_S, // COND_S, requires S - x86emit::COND_NS, // COND_NS, requires S - x86emit::COND_C, // COND_C, requires C - x86emit::COND_NC, // COND_NC, requires C - x86emit::COND_O, // COND_V, requires V - x86emit::COND_NO, // COND_NV, requires V - x86emit::COND_P, // COND_U, requires U - x86emit::COND_NP, // COND_NU, requires U - x86emit::COND_A, // COND_A, requires CZ - x86emit::COND_BE, // COND_BE, requires CZ - x86emit::COND_G, // COND_G, requires SVZ - x86emit::COND_LE, // COND_LE, requires SVZ - x86emit::COND_L, // COND_L, requires SV - x86emit::COND_GE, // COND_GE, requires SV +static const Condition::Code condition_map[uml::COND_MAX - uml::COND_Z] = +{ + Condition::Code::kZ, // COND_Z = 0x80, requires Z + Condition::Code::kNZ, // COND_NZ, requires Z + Condition::Code::kS, // COND_S, requires S + Condition::Code::kNS, // COND_NS, requires S + Condition::Code::kC, // COND_C, requires C + Condition::Code::kNC, // COND_NC, requires C + Condition::Code::kO, // COND_V, requires V + Condition::Code::kNO, // COND_NV, requires V + Condition::Code::kP, // COND_U, requires U + Condition::Code::kNP, // COND_NU, requires U + Condition::Code::kA, // COND_A, requires CZ + Condition::Code::kBE, // COND_BE, requires CZ + Condition::Code::kG, // COND_G, requires SVZ + Condition::Code::kLE, // COND_LE, requires SVZ + Condition::Code::kL, // COND_L, requires SV + Condition::Code::kGE, // COND_GE, requires SV }; // FPU control register mapping @@ -287,6 +277,14 @@ const drcbe_x86::opcode_table_entry drcbe_x86::s_opcode_table_source[] = { uml::OP_ICOPYF, &drcbe_x86::op_icopyf }, // ICOPYF dst,src }; +class ThrowableErrorHandler : public ErrorHandler +{ +public: + void handleError(Error err, const char *message, BaseEmitter *origin) override + { + throw emu_fatalerror("asmjit error %d: %s", err, message); + } +}; //************************************************************************** @@ -298,7 +296,7 @@ const drcbe_x86::opcode_table_entry drcbe_x86::s_opcode_table_source[] = // into a reduced set //------------------------------------------------- -drcbe_x86::be_parameter::be_parameter(drcbe_x86 &drcbe, const parameter ¶m, uint32_t allowed) +drcbe_x86::be_parameter::be_parameter(drcbe_x86 &drcbe, parameter const ¶m, uint32_t allowed) { int regnum; @@ -347,21 +345,28 @@ drcbe_x86::be_parameter::be_parameter(drcbe_x86 &drcbe, const parameter ¶m, // checkparam //------------------------------------------------- -inline int drcbe_x86::be_parameter::select_register(int defreg) const +inline Gp drcbe_x86::be_parameter::select_register(Gp const &defreg) const +{ + if (m_type == PTYPE_INT_REGISTER) + return Gpd(m_value); + return defreg; +} + +inline Xmm drcbe_x86::be_parameter::select_register(Xmm defreg) const { - if (m_type == PTYPE_INT_REGISTER || m_type == PTYPE_FLOAT_REGISTER || m_type == PTYPE_VECTOR_REGISTER) - return m_value; + if (m_type == PTYPE_FLOAT_REGISTER) + return Xmm(m_value); return defreg; } -inline int drcbe_x86::be_parameter::select_register(int defreg, const be_parameter &checkparam) const +template <typename T> T drcbe_x86::be_parameter::select_register(T defreg, be_parameter const &checkparam) const { if (*this == checkparam) return defreg; return select_register(defreg); } -inline int drcbe_x86::be_parameter::select_register(int defreg, const be_parameter &checkparam, const be_parameter &checkparam2) const +template <typename T> T drcbe_x86::be_parameter::select_register(T defreg, be_parameter const &checkparam, be_parameter const &checkparam2) const { if (*this == checkparam || *this == checkparam2) return defreg; @@ -400,16 +405,16 @@ inline void drcbe_x86::normalize_commutative(be_parameter &inner, be_parameter & // two 32-bit operations //------------------------------------------------- -inline void drcbe_x86::emit_combine_z_flags(x86code *&dst) +inline void drcbe_x86::emit_combine_z_flags(Assembler &a) { // this assumes that the flags from the low 32-bit op are on the stack // and the flags from the high 32-bit op are live - emit_pushf(dst); // pushf - emit_mov_r32_m32(dst, REG_ECX, MBD(REG_ESP, 4)); // mov ecx,[esp+4] - emit_or_r32_imm(dst, REG_ECX, ~0x40); // or ecx,~0x40 - emit_and_m32_r32(dst, MBD(REG_ESP, 0), REG_ECX); // and [esp],ecx - emit_popf(dst); // popf - emit_lea_r32_m32(dst, REG_ESP, MBD(REG_ESP, 4)); // lea esp,[esp+4] + a.pushfd(); // pushf + a.mov(ecx, ptr(esp, 4)); // mov ecx,[esp+4] + a.or_(ecx, ~0x40); // or ecx,~0x40 + a.and_(ptr(esp, 0), ecx); // and [esp],ecx + a.popfd(); // popf + a.lea(esp, ptr(esp, 4)); // lea esp,[esp+4] } @@ -418,15 +423,15 @@ inline void drcbe_x86::emit_combine_z_flags(x86code *&dst) // flags from two 32-bit shift left operations //------------------------------------------------- -inline void drcbe_x86::emit_combine_z_shl_flags(x86code *&dst) +inline void drcbe_x86::emit_combine_z_shl_flags(Assembler &a) { // this assumes that the flags from the high 32-bit op are on the stack // and the flags from the low 32-bit op are live - emit_pushf(dst); // pushf - emit_pop_r32(dst, REG_ECX); // pop ecx - emit_or_r32_imm(dst, REG_ECX, ~0x40); // or ecx,~0x40 - emit_and_m32_r32(dst, MBD(REG_ESP, 0), REG_ECX); // and [esp],ecx - emit_popf(dst); // popf + a.pushfd(); // pushf + a.pop(ecx); // pop ecx + a.or_(ecx, ~0x40); // or ecx,~0x40 + a.and_(ptr(esp, 0), ecx); // and [esp],ecx + a.popfd(); // popf } @@ -437,8 +442,8 @@ inline void drcbe_x86::emit_combine_z_shl_flags(x86code *&dst) inline void drcbe_x86::reset_last_upper_lower_reg() { - m_last_lower_reg = REG_NONE; - m_last_upper_reg = REG_NONE; + m_last_lower_reg = Gp(); + m_last_upper_reg = Gp(); } @@ -447,13 +452,13 @@ inline void drcbe_x86::reset_last_upper_lower_reg() // loaded a lower register //------------------------------------------------- -inline void drcbe_x86::set_last_lower_reg(x86code *&dst, const be_parameter ¶m, uint8_t reglo) +inline void drcbe_x86::set_last_lower_reg(Assembler &a, be_parameter const ¶m, Gp const ®lo) { if (param.is_memory()) { m_last_lower_reg = reglo; m_last_lower_addr = (uint32_t *)((uintptr_t)param.memory()); - m_last_lower_pc = dst; + m_last_lower_pc = (x86code *)(a.code()->baseAddress() + a.offset()); } } @@ -463,11 +468,11 @@ inline void drcbe_x86::set_last_lower_reg(x86code *&dst, const be_parameter &par // loaded an upper register //------------------------------------------------- -inline void drcbe_x86::set_last_upper_reg(x86code *&dst, const be_parameter ¶m, uint8_t reghi) +inline void drcbe_x86::set_last_upper_reg(Assembler &a, be_parameter const ¶m, Gp const ®hi) { m_last_upper_reg = reghi; m_last_upper_addr = (param.is_int_register()) ? m_reghi[param.ireg()] : (uint32_t *)((uintptr_t)param.memory(4)); - m_last_upper_pc = dst; + m_last_upper_pc = (x86code *)(a.code()->baseAddress() + a.offset()); } @@ -476,9 +481,9 @@ inline void drcbe_x86::set_last_upper_reg(x86code *&dst, const be_parameter &par // skip re-loading a lower half of a register //------------------------------------------------- -inline bool drcbe_x86::can_skip_lower_load(x86code *&dst, uint32_t *memref, uint8_t reglo) +inline bool drcbe_x86::can_skip_lower_load(Assembler &a, uint32_t *memref, Gp const ®lo) { - return (dst == m_last_lower_pc && memref == m_last_lower_addr && reglo == m_last_lower_reg); + return ((x86code *)(a.code()->baseAddress() + a.offset()) == m_last_lower_pc && memref == m_last_lower_addr && reglo == m_last_lower_reg); } @@ -487,32 +492,13 @@ inline bool drcbe_x86::can_skip_lower_load(x86code *&dst, uint32_t *memref, uint // skip re-loading an upper half of a register //------------------------------------------------- -inline bool drcbe_x86::can_skip_upper_load(x86code *&dst, uint32_t *memref, uint8_t reghi) +inline bool drcbe_x86::can_skip_upper_load(Assembler &a, uint32_t *memref, Gp const ®hi) { - return (dst == m_last_upper_pc && memref == m_last_upper_addr && reghi == m_last_upper_reg); + return ((x86code *)(a.code()->baseAddress() + a.offset()) == m_last_upper_pc && memref == m_last_upper_addr && reghi == m_last_upper_reg); } //------------------------------------------------- -// track_resolve_link - wrapper for resolve_link -// that resets all register tracking info -//------------------------------------------------- - -inline void drcbe_x86::track_resolve_link(x86code *&destptr, const emit_link &linkinfo) -{ - reset_last_upper_lower_reg(); - resolve_link(destptr, linkinfo); -} - -#define resolve_link INVALID - - - -//************************************************************************** -// BACKEND CALLBACKS -//************************************************************************** - -//------------------------------------------------- // drcbe_x86 - constructor //------------------------------------------------- @@ -520,19 +506,19 @@ drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, u : drcbe_interface(drcuml, cache, device), m_hash(cache, modes, addrbits, ignorebits), m_map(cache, 0), - m_labels(cache), m_log(nullptr), + m_log_asmjit(nullptr), m_logged_common(false), - m_sse3(false), + m_sse3(CpuInfo::host().features().as<Features>().hasSSE3()), m_entry(nullptr), m_exit(nullptr), m_nocode(nullptr), m_save(nullptr), m_restore(nullptr), - m_last_lower_reg(REG_NONE), + m_last_lower_reg(Gp()), m_last_lower_pc(nullptr), m_last_lower_addr(nullptr), - m_last_upper_reg(REG_NONE), + m_last_upper_reg(Gp()), m_last_upper_pc(nullptr), m_last_upper_addr(nullptr), m_fptemp(0), @@ -541,9 +527,7 @@ drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, u m_stacksave(nullptr), m_hashstacksave(nullptr), m_reslo(0), - m_reshi(0), - m_fixup_label(&drcbe_x86::fixup_label, this), - m_fixup_exception(&drcbe_x86::fixup_exception, this) + m_reshi(0) { // compute hi pointers for each register for (int regnum = 0; regnum < ARRAY_LENGTH(int_register_map); regnum++) @@ -584,6 +568,7 @@ drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, u { std::string filename = std::string("drcbex86_").append(device.shortname()).append(".asm"); m_log = x86log_create_context(filename.c_str()); + m_log_asmjit = fopen(std::string("drcbex86_asmjit_").append(device.shortname()).append(".asm").c_str(), "w"); } } @@ -597,8 +582,50 @@ drcbe_x86::~drcbe_x86() // free the log context if (m_log != nullptr) x86log_free_context(m_log); + + if (m_log_asmjit) + fclose(m_log_asmjit); } +size_t drcbe_x86::emit(CodeHolder &ch) +{ + Error err; + + // the following three calls aren't currently required, but may be if + // other asmjist features are used in future + if (false) + { + err = ch.flatten(); + if (err) + throw emu_fatalerror("asmjit::CodeHolder::flatten() error %d", err); + + err = ch.resolveUnresolvedLinks(); + if (err) + throw emu_fatalerror("asmjit::CodeHolder::resolveUnresolvedLinks() error %d", err); + + err = ch.relocateToBase(ch.baseAddress()); + if (err) + throw emu_fatalerror("asmjit::CodeHolder::relocateToBase() error %d", err); + } + + size_t const alignment = ch.baseAddress() - uint64_t(m_cache.top()); + size_t const code_size = ch.codeSize(); + + // test if enough room remains in drc cache + drccodeptr *cachetop = m_cache.begin_codegen(alignment + code_size); + if (cachetop == nullptr) + return 0; + + err = ch.copyFlattenedData(drccodeptr(ch.baseAddress()), code_size, CodeHolder::kCopyWithPadding); + if (err) + throw emu_fatalerror("asmjit::CodeHolder::copyFlattenedData() error %d", err); + + // update the drc cache and end codegen + *cachetop += alignment + code_size; + m_cache.end_codegen(); + + return code_size; +} //------------------------------------------------- // reset - reset back-end specific state @@ -611,139 +638,148 @@ void drcbe_x86::reset() x86log_printf(m_log, "%s", "\n\n===========\nCACHE RESET\n===========\n\n"); // generate a little bit of glue code to set up the environment - drccodeptr *cachetop = m_cache.begin_codegen(500); - if (cachetop == nullptr) - fatalerror("Out of cache space after a reset!\n"); + x86code *dst = (x86code *)m_cache.top(); - x86code *dst = (x86code *)*cachetop; + CodeHolder ch; + ch.init(hostEnvironment(), uint64_t(dst)); - // generate a simple CPUID stub - uint32_t (*cpuid_ecx_stub)(void) = (uint32_t (*)(void))dst; - emit_push_r32(dst, REG_EBX); // push ebx - emit_mov_r32_imm(dst, REG_EAX, 1); // mov eax,1 - emit_cpuid(dst); // cpuid - emit_mov_r32_r32(dst, REG_EAX, REG_ECX); // mov eax,ecx - emit_pop_r32(dst, REG_EBX); // pop ebx - emit_ret(dst); // ret + FileLogger logger(m_log_asmjit); + if (logger.file()) + { + logger.setFlags(FormatOptions::Flags::kFlagHexOffsets | FormatOptions::Flags::kFlagHexImms | FormatOptions::Flags::kFlagMachineCode); + logger.setIndentation(FormatOptions::IndentationType::kIndentationCode, 4); + ch.setLogger(&logger); + } - // call it to determine if we have SSE3 support - m_sse3 = (((*cpuid_ecx_stub)() & 1) != 0); + Assembler a(&ch); + if (logger.file()) + a.addValidationOptions(BaseEmitter::kValidationOptionIntermediate); // generate an entry point m_entry = (x86_entry_point_func)dst; - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ESP, 4)); // mov eax,[esp+4] - emit_push_r32(dst, REG_EBX); // push ebx - emit_push_r32(dst, REG_ESI); // push esi - emit_push_r32(dst, REG_EDI); // push edi - emit_push_r32(dst, REG_EBP); // push ebp - emit_sub_r32_imm(dst, REG_ESP, 24); // sub esp,24 - emit_mov_m32_r32(dst, MABS(&m_hashstacksave), REG_ESP); // mov [hashstacksave],esp - emit_sub_r32_imm(dst, REG_ESP, 4); // sub esp,4 - emit_mov_m32_r32(dst, MABS(&m_stacksave), REG_ESP); // mov [stacksave],esp - emit_fstcw_m16(dst, MABS(&m_fpumode)); // fstcw [fpumode] - emit_jmp_r32(dst, REG_EAX); // jmp eax - if (m_log != nullptr && !m_logged_common) - x86log_disasm_code_range(m_log, "entry_point", (x86code *)m_entry, dst); + a.bind(a.newNamedLabel("entry_point")); + + FuncDetail entry_point; + entry_point.init(FuncSignatureT<uint32_t, x86code *>(CallConv::kIdHost), hostEnvironment()); + + FuncFrame frame; + frame.init(entry_point); + frame.addDirtyRegs(ebx, esi, edi, ebp); + FuncArgsAssignment args(&entry_point); + args.assignAll(eax); + args.updateFuncFrame(frame); + frame.finalize(); + + a.emitProlog(frame); + a.emitArgsAssignment(frame, args); + a.sub(esp, 24); // sub esp,24 + a.mov(MABS(&m_hashstacksave), esp); // mov [hashstacksave],esp + a.sub(esp, 4); // sub esp,4 + a.mov(MABS(&m_stacksave), esp); // mov [stacksave],esp + a.fnstcw(MABS(&m_fpumode)); // fstcw [fpumode] + a.jmp(eax); // jmp eax // generate an exit point - m_exit = dst; - emit_fldcw_m16(dst, MABS(&m_fpumode)); // fldcw [fpumode] - emit_mov_r32_m32(dst, REG_ESP, MABS(&m_hashstacksave)); // mov esp,[hashstacksave] - emit_add_r32_imm(dst, REG_ESP, 24); // add esp,24 - emit_pop_r32(dst, REG_EBP); // pop ebp - emit_pop_r32(dst, REG_EDI); // pop edi - emit_pop_r32(dst, REG_ESI); // pop esi - emit_pop_r32(dst, REG_EBX); // pop ebx - emit_ret(dst); // ret - if (m_log != nullptr && !m_logged_common) - x86log_disasm_code_range(m_log, "exit_point", m_exit, dst); + m_exit = dst + a.offset(); + a.bind(a.newNamedLabel("exit_point")); + a.fldcw(MABS(&m_fpumode)); // fldcw [fpumode] + a.mov(esp, MABS(&m_hashstacksave)); // mov esp,[hashstacksave] + a.add(esp, 24); // add esp,24 + a.emitEpilog(frame); // generate a no code point - m_nocode = dst; - emit_ret(dst); // ret - if (m_log != nullptr && !m_logged_common) - x86log_disasm_code_range(m_log, "nocode", m_nocode, dst); + m_nocode = dst + a.offset(); + a.bind(a.newNamedLabel("nocode_point")); + a.ret(); // ret // generate a save subroutine - m_save = dst; - emit_pushf(dst); // pushf - emit_pop_r32(dst, REG_EAX); // pop eax - emit_and_r32_imm(dst, REG_EAX, 0x8c5); // and eax,0x8c5 - emit_mov_r8_m8(dst, REG_AL, MABSI(flags_map, REG_EAX)); // mov al,[flags_map] - emit_mov_m8_r8(dst, MBD(REG_ECX, offsetof(drcuml_machine_state, flags)), REG_AL); // mov state->flags,al - emit_mov_r8_m8(dst, REG_AL, MABS(&m_state.fmod)); // mov al,[fmod] - emit_mov_m8_r8(dst, MBD(REG_ECX, offsetof(drcuml_machine_state, fmod)), REG_AL); // mov state->fmod,al - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.exp)); // mov eax,[exp] - emit_mov_m32_r32(dst, MBD(REG_ECX, offsetof(drcuml_machine_state, exp)), REG_EAX); // mov state->exp,eax + m_save = dst + a.offset(); + a.bind(a.newNamedLabel("save")); + a.pushfd(); // pushf + a.pop(eax); // pop eax + a.and_(eax, 0x8c5); // and eax,0x8c5 + a.mov(al, ptr(u64(flags_map), eax)); // mov al,[flags_map] + a.mov(ptr(ecx, offsetof(drcuml_machine_state, flags)), al); // mov state->flags,al + a.mov(al, MABS(&m_state.fmod)); // mov al,[fmod] + a.mov(ptr(ecx, offsetof(drcuml_machine_state, fmod)), al); // mov state->fmod,al + a.mov(eax, MABS(&m_state.exp)); // mov eax,[exp] + a.mov(ptr(ecx, offsetof(drcuml_machine_state, exp)), eax); // mov state->exp,eax for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.r); regnum++) { uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.l; uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.h; if (int_register_map[regnum] != 0) - emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsl), int_register_map[regnum]); + a.mov(ptr(ecx, regoffsl), Gpd(int_register_map[regnum])); else { - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.r[regnum].w.l)); - emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsl), REG_EAX); + a.mov(eax, MABS(&m_state.r[regnum].w.l)); + a.mov(ptr(ecx, regoffsl), eax); } - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.r[regnum].w.h)); - emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsh), REG_EAX); + a.mov(eax, MABS(&m_state.r[regnum].w.h)); + a.mov(ptr(ecx, regoffsh), eax); } for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.f); regnum++) { uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.l; uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.h; - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.f[regnum].s.l)); - emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsl), REG_EAX); - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.f[regnum].s.h)); - emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsh), REG_EAX); + a.mov(eax, MABS(&m_state.f[regnum].s.l)); + a.mov(ptr(ecx, regoffsl), eax); + a.mov(eax, MABS(&m_state.f[regnum].s.h)); + a.mov(ptr(ecx, regoffsh), eax); } - emit_ret(dst); // ret - if (m_log != nullptr && !m_logged_common) - x86log_disasm_code_range(m_log, "save", m_save, dst); + a.ret(); // ret // generate a restore subroutine - m_restore = dst; + m_restore = dst + a.offset(); + a.bind(a.newNamedLabel("restore")); for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.r); regnum++) { uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.l; uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.h; if (int_register_map[regnum] != 0) - emit_mov_r32_m32(dst, int_register_map[regnum], MBD(REG_ECX, regoffsl)); + a.mov(Gpd(int_register_map[regnum]), ptr(ecx, regoffsl)); else { - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsl)); - emit_mov_m32_r32(dst, MABS(&m_state.r[regnum].w.l), REG_EAX); + a.mov(eax, ptr(ecx, regoffsl)); + a.mov(MABS(&m_state.r[regnum].w.l), eax); } - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsh)); - emit_mov_m32_r32(dst, MABS(&m_state.r[regnum].w.h), REG_EAX); + a.mov(eax, ptr(ecx, regoffsh)); + a.mov(MABS(&m_state.r[regnum].w.h), eax); } for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.f); regnum++) { uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.l; uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.h; - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsl)); - emit_mov_m32_r32(dst, MABS(&m_state.f[regnum].s.l), REG_EAX); - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsh)); - emit_mov_m32_r32(dst, MABS(&m_state.f[regnum].s.h), REG_EAX); - } - emit_movzx_r32_m8(dst, REG_EAX, MBD(REG_ECX, offsetof(drcuml_machine_state, fmod)));// movzx eax,state->fmod - emit_and_r32_imm(dst, REG_EAX, 3); // and eax,3 - emit_mov_m8_r8(dst, MABS(&m_state.fmod), REG_AL); // mov [fmod],al - emit_fldcw_m16(dst, MABSI(&fp_control[0], REG_EAX, 2)); // fldcw fp_control[eax] - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, offsetof(drcuml_machine_state, exp))); // mov eax,state->exp - emit_mov_m32_r32(dst, MABS(&m_state.exp), REG_EAX); // mov [exp],eax - emit_movzx_r32_m8(dst, REG_EAX, MBD(REG_ECX, offsetof(drcuml_machine_state, flags)));// movzx eax,state->flags - emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4] - emit_popf(dst); // popf - emit_ret(dst); // ret + a.mov(eax, ptr(ecx, regoffsl)); + a.mov(MABS(&m_state.f[regnum].s.l), eax); + a.mov(eax, ptr(ecx, regoffsh)); + a.mov(MABS(&m_state.f[regnum].s.h), eax); + } + a.movzx(eax, byte_ptr(ecx, offsetof(drcuml_machine_state, fmod))); // movzx eax,state->fmod + a.and_(eax, 3); // and eax,3 + a.mov(MABS(&m_state.fmod), al); // mov [fmod],al + a.fldcw(word_ptr(u64(&fp_control[0]), eax, 1)); // fldcw fp_control[eax*2] + a.mov(eax, ptr(ecx, offsetof(drcuml_machine_state, exp))); // mov eax,state->exp + a.mov(MABS(&m_state.exp), eax); // mov [exp],eax + a.movzx(eax, byte_ptr(ecx, offsetof(drcuml_machine_state, flags))); // movzx eax,state->flags + a.push(dword_ptr(u64(flags_unmap), eax, 2)); // push flags_unmap[eax*4] + a.popfd(); // popf + a.ret(); // ret + + + // emit the generated code + size_t bytes = emit(ch); + if (m_log != nullptr && !m_logged_common) - x86log_disasm_code_range(m_log, "restore", m_restore, dst); + { + x86log_disasm_code_range(m_log, "entry_point", dst, m_exit); + x86log_disasm_code_range(m_log, "exit_point", m_exit, m_nocode); + x86log_disasm_code_range(m_log, "nocode_point", m_nocode, m_save); + x86log_disasm_code_range(m_log, "save", m_save, m_restore); + x86log_disasm_code_range(m_log, "restore", m_restore, dst + bytes); - // finish up codegen - *cachetop = dst; - m_cache.end_codegen(); - m_logged_common = true; + m_logged_common = true; + } // reset our hash tables m_hash.reset(); @@ -771,17 +807,27 @@ void drcbe_x86::generate(drcuml_block &block, const instruction *instlist, uint3 { // tell all of our utility objects that a block is beginning m_hash.block_begin(block, instlist, numinst); - m_labels.block_begin(block); m_map.block_begin(block); - // begin codegen; fail if we can't - drccodeptr *cachetop = m_cache.begin_codegen(numinst * 8 * 4); - if (cachetop == nullptr) - block.abort(); - // compute the base by aligning the cache top to a cache line (assumed to be 64 bytes) - x86code *base = (x86code *)(((uintptr_t)*cachetop + 63) & ~63); - x86code *dst = base; + x86code *dst = (x86code *)(uint64_t(m_cache.top() + 63) & ~63); + + CodeHolder ch; + ch.init(hostEnvironment(), uint64_t(dst)); + ThrowableErrorHandler e; + ch.setErrorHandler(&e); + + FileLogger logger(m_log_asmjit); + if (logger.file()) + { + logger.setFlags(FormatOptions::Flags::kFlagHexOffsets | FormatOptions::Flags::kFlagHexImms | FormatOptions::Flags::kFlagMachineCode); + logger.setIndentation(FormatOptions::IndentationType::kIndentationCode, 4); + ch.setLogger(&logger); + } + + Assembler a(&ch); + if (logger.file()) + a.addValidationOptions(BaseEmitter::kValidationOptionIntermediate); // generate code std::string blockname; @@ -790,11 +836,15 @@ void drcbe_x86::generate(drcuml_block &block, const instruction *instlist, uint3 const instruction &inst = instlist[inum]; assert(inst.opcode() < ARRAY_LENGTH(s_opcode_table)); + // must remain in scope until output + std::string dasm; + // add a comment if (m_log != nullptr) { - std::string dasm = inst.disasm(&m_drcuml); - x86log_add_comment(m_log, dst, "%s", dasm.c_str()); + dasm = inst.disasm(&m_drcuml); + x86log_add_comment(m_log, dst + a.offset(), "%s", dasm.c_str()); + a.setInlineComment(dasm.c_str()); } // extract a blockname @@ -807,20 +857,20 @@ void drcbe_x86::generate(drcuml_block &block, const instruction *instlist, uint3 } // generate code - (this->*s_opcode_table[inst.opcode()])(dst, inst); + (this->*s_opcode_table[inst.opcode()])(a, inst); } - // complete codegen - *cachetop = (drccodeptr)dst; - m_cache.end_codegen(); + // emit the generated code + size_t const bytes = emit(ch); + if (!bytes) + block.abort(); // log it if (m_log != nullptr) - x86log_disasm_code_range(m_log, (blockname.empty()) ? "Unknown block" : blockname.c_str(), base, m_cache.top()); + x86log_disasm_code_range(m_log, (blockname.empty()) ? "Unknown block" : blockname.c_str(), dst, dst + bytes); // tell all of our utility objects that the block is finished m_hash.block_end(block); - m_labels.block_end(block); m_map.block_end(block); } @@ -860,21 +910,21 @@ void drcbe_x86::get_info(drcbe_info &info) // into a register //------------------------------------------------- -void drcbe_x86::emit_mov_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m) +void drcbe_x86::emit_mov_r32_p32(Assembler &a, Gp const ®, be_parameter const ¶m) { if (param.is_immediate()) { if (param.immediate() == 0) - emit_xor_r32_r32(dst, reg, reg); // xor reg,reg + a.xor_(reg, reg); // xor reg,reg else - emit_mov_r32_imm(dst, reg, param.immediate()); // mov reg,param + a.mov(reg, param.immediate()); // mov reg,param } else if (param.is_memory()) - emit_mov_r32_m32(dst, reg, MABS(param.memory())); // mov reg,[param] + a.mov(reg, MABS(param.memory())); // mov reg,[param] else if (param.is_int_register()) { - if (reg != param.ireg()) - emit_mov_r32_r32(dst, reg, param.ireg()); // mov reg,param + if (reg.id() != param.ireg()) + a.mov(reg, Gpd(param.ireg())); // mov reg,param } } @@ -885,19 +935,19 @@ void drcbe_x86::emit_mov_r32_p32(x86code *&dst, uint8_t reg, const be_parameter // any flags //------------------------------------------------- -void drcbe_x86::emit_mov_r32_p32_keepflags(x86code *&dst, uint8_t reg, const be_parameter ¶m) +void drcbe_x86::emit_mov_r32_p32_keepflags(Assembler &a, Gp const ®, be_parameter const ¶m) { if (param.is_immediate()) - emit_mov_r32_imm(dst, reg, param.immediate()); // mov reg,param + a.mov(reg, param.immediate()); // mov reg,param else if (param.is_memory()) { - if (!can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), reg)) - emit_mov_r32_m32(dst, reg, MABS(param.memory())); // mov reg,[param] + if (!can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory()), reg)) + a.mov(reg, MABS(param.memory())); // mov reg,[param] } else if (param.is_int_register()) { - if (reg != param.ireg()) - emit_mov_r32_r32(dst, reg, param.ireg()); // mov reg,param + if (reg.id() != param.ireg()) + a.mov(reg, Gpd(param.ireg())); // mov reg,param } } @@ -907,18 +957,18 @@ void drcbe_x86::emit_mov_r32_p32_keepflags(x86code *&dst, uint8_t reg, const be_ // into a memory location //------------------------------------------------- -void drcbe_x86::emit_mov_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m) +void drcbe_x86::emit_mov_m32_p32(Assembler &a, Mem memref, be_parameter const ¶m) { if (param.is_immediate()) - emit_mov_m32_imm(dst, memref, param.immediate()); // mov [mem],param + a.mov(memref, param.immediate()); // mov [mem],param else if (param.is_memory()) { - if (!can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), REG_EAX)) - emit_mov_r32_m32(dst, REG_EAX, MABS(param.memory())); // mov eax,[param] - emit_mov_m32_r32(dst, memref, REG_EAX); // mov [mem],eax + if (!can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory()), eax)) + a.mov(eax, MABS(param.memory())); // mov eax,[param] + a.mov(memref, eax); // mov [mem],eax } else if (param.is_int_register()) - emit_mov_m32_r32(dst, memref, param.ireg()); // mov [mem],param + a.mov(memref, Gpd(param.ireg())); // mov [mem],param } @@ -927,688 +977,65 @@ void drcbe_x86::emit_mov_m32_p32(x86code *&dst, x86_memref memref, const be_para // 32-bit parameter //------------------------------------------------- -void drcbe_x86::emit_mov_p32_r32(x86code *&dst, const be_parameter ¶m, uint8_t reg) +void drcbe_x86::emit_mov_p32_r32(Assembler &a, be_parameter const ¶m, Gp const ®) { assert(!param.is_immediate()); if (param.is_memory()) { - emit_mov_m32_r32(dst, MABS(param.memory()), reg); // mov [param],reg - set_last_lower_reg(dst, param, reg); - } - else if (param.is_int_register()) - { - if (reg != param.ireg()) - emit_mov_r32_r32(dst, param.ireg(), reg); // mov param,reg - } -} - - -//------------------------------------------------- -// emit_add_r32_p32 - add operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_add_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() != 0 || param.immediate() != 0) - emit_add_r32_imm(dst, reg, param.immediate()); // add reg,param - } - else if (param.is_memory()) - emit_add_r32_m32(dst, reg, MABS(param.memory())); // add reg,[param] - else if (param.is_int_register()) - emit_add_r32_r32(dst, reg, param.ireg()); // add reg,param -} - - -//------------------------------------------------- -// emit_add_m32_p32 - add operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_add_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() != 0 || param.immediate() != 0) - emit_add_m32_imm(dst, memref, param.immediate()); // add [dest],param - } - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32(dst, reg, param); // mov reg,param - emit_add_m32_r32(dst, memref, reg); // add [dest],reg - } -} - - -//------------------------------------------------- -// emit_adc_r32_p32 - adc operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_adc_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_adc_r32_imm(dst, reg, param.immediate()); // adc reg,param - else if (param.is_memory()) - emit_adc_r32_m32(dst, reg, MABS(param.memory())); // adc reg,[param] - else if (param.is_int_register()) - emit_adc_r32_r32(dst, reg, param.ireg()); // adc reg,param -} - - -//------------------------------------------------- -// emit_adc_m32_p32 - adc operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_adc_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_adc_m32_imm(dst, memref, param.immediate()); // adc [dest],param - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32_keepflags(dst, reg, param); // mov reg,param - emit_adc_m32_r32(dst, memref, reg); // adc [dest],reg - } -} - - -//------------------------------------------------- -// emit_sub_r32_p32 - sub operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sub_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() != 0 || param.immediate() != 0) - emit_sub_r32_imm(dst, reg, param.immediate()); // sub reg,param + a.mov(MABS(param.memory()), reg); // mov [param],reg + set_last_lower_reg(a, param, reg); } - else if (param.is_memory()) - emit_sub_r32_m32(dst, reg, MABS(param.memory())); // sub reg,[param] - else if (param.is_int_register()) - emit_sub_r32_r32(dst, reg, param.ireg()); // sub reg,param -} - - -//------------------------------------------------- -// emit_sub_m32_p32 - sub operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sub_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() != 0 || param.immediate() != 0) - emit_sub_m32_imm(dst, memref, param.immediate()); // sub [dest],param - } - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32(dst, reg, param); // mov reg,param - emit_sub_m32_r32(dst, memref, reg); // sub [dest],reg - } -} - - -//------------------------------------------------- -// emit_sbb_r32_p32 - sbb operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sbb_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_sbb_r32_imm(dst, reg, param.immediate()); // sbb reg,param - else if (param.is_memory()) - emit_sbb_r32_m32(dst, reg, MABS(param.memory())); // sbb reg,[param] - else if (param.is_int_register()) - emit_sbb_r32_r32(dst, reg, param.ireg()); // sbb reg,param -} - - -//------------------------------------------------- -// emit_sbb_m32_p32 - sbb operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sbb_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_sbb_m32_imm(dst, memref, param.immediate()); // sbb [dest],param - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32_keepflags(dst, reg, param); // mov reg,param - emit_sbb_m32_r32(dst, memref, reg); // sbb [dest],reg - } -} - - -//------------------------------------------------- -// emit_cmp_r32_p32 - cmp operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_cmp_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_cmp_r32_imm(dst, reg, param.immediate()); // cmp reg,param - else if (param.is_memory()) - emit_cmp_r32_m32(dst, reg, MABS(param.memory())); // cmp reg,[param] else if (param.is_int_register()) - emit_cmp_r32_r32(dst, reg, param.ireg()); // cmp reg,param -} - - -//------------------------------------------------- -// emit_cmp_m32_p32 - cmp operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_cmp_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_cmp_m32_imm(dst, memref, param.immediate()); // cmp [dest],param - else { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32(dst, reg, param); // mov reg,param - emit_cmp_m32_r32(dst, memref, reg); // cmp [dest],reg + if (reg.id() != param.ireg()) + a.mov(Gpd(param.ireg()), reg); // mov param,reg } } -//------------------------------------------------- -// emit_and_r32_p32 - and operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_and_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::alu_op_param(Assembler &a, Inst::Id const opcode, Operand const &dst, be_parameter const ¶m, std::function<bool(Assembler &a, Operand const &dst, be_parameter const &src)> optimize) { if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - emit_xor_r32_r32(dst, reg, reg); // xor reg,reg - else - emit_and_r32_imm(dst, reg, param.immediate()); // and reg,param + if (!optimize(a, dst, param)) + a.emit(opcode, dst, param.immediate()); // op dst,param } else if (param.is_memory()) - emit_and_r32_m32(dst, reg, MABS(param.memory())); // and reg,[param] - else if (param.is_int_register()) - emit_and_r32_r32(dst, reg, param.ireg()); // and reg,param -} - - -//------------------------------------------------- -// emit_and_m32_p32 - and operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_and_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - emit_mov_m32_imm(dst, memref, 0); // mov [dest],0 - else - emit_and_m32_imm(dst, memref, param.immediate()); // and [dest],param - } - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32(dst, reg, param); // mov reg,param - emit_and_m32_r32(dst, memref, reg); // and [dest],reg - } -} - - -//------------------------------------------------- -// emit_test_r32_p32 - test operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_test_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_test_r32_imm(dst, reg, param.immediate()); // test reg,param - else if (param.is_memory()) - emit_test_m32_r32(dst, MABS(param.memory()), reg); // test [param],reg - else if (param.is_int_register()) - emit_test_r32_r32(dst, reg, param.ireg()); // test reg,param -} - - -//------------------------------------------------- -// emit_test_m32_p32 - test operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_test_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - emit_test_m32_imm(dst, memref, param.immediate()); // test [dest],param - else if (param.is_memory()) - { - emit_mov_r32_p32(dst, REG_EAX, param); // mov reg,param - emit_test_m32_r32(dst, memref, REG_EAX); // test [dest],reg - } - else if (param.is_int_register()) - emit_test_m32_r32(dst, memref, param.ireg()); // test [dest],param -} - - -//------------------------------------------------- -// emit_or_r32_p32 - or operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_or_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_mov_r32_imm(dst, reg, ~0); // mov reg,-1 - else - emit_or_r32_imm(dst, reg, param.immediate()); // or reg,param - } - else if (param.is_memory()) - emit_or_r32_m32(dst, reg, MABS(param.memory())); // or reg,[param] - else if (param.is_int_register()) - emit_or_r32_r32(dst, reg, param.ireg()); // or reg,param -} - - -//------------------------------------------------- -// emit_or_m32_p32 - or operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_or_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_mov_m32_imm(dst, memref, ~0); // mov [dest],-1 - else - emit_or_m32_imm(dst, memref, param.immediate()); // or [dest],param - } - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32(dst, reg, param); // mov reg,param - emit_or_m32_r32(dst, memref, reg); // or [dest],reg - } -} - - -//------------------------------------------------- -// emit_xor_r32_p32 - xor operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- + if (dst.isMem()) + { + // use temporary register for memory,memory + Gp const reg = param.select_register(eax); -void drcbe_x86::emit_xor_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_not_r32(dst, reg); // not reg + a.mov(reg, MABS(param.memory())); // mov reg,param + a.emit(opcode, dst, reg); // op [dst],reg + } + else if (opcode != Inst::kIdTest) + // most instructions are register,memory + a.emit(opcode, dst, MABS(param.memory())); // op dst,[param] else - emit_xor_r32_imm(dst, reg, param.immediate()); // xor reg,param + // test instruction requires memory,register + a.emit(opcode, MABS(param.memory()), dst); // op [param],dst } - else if (param.is_memory()) - emit_xor_r32_m32(dst, reg, MABS(param.memory())); // xor reg,[param] else if (param.is_int_register()) - emit_xor_r32_r32(dst, reg, param.ireg()); // xor reg,param -} - - -//------------------------------------------------- -// emit_xor_m32_p32 - xor operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_xor_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_not_m32(dst, memref); // not [dest] - else - emit_xor_m32_imm(dst, memref, param.immediate()); // xor [dest],param - } - else - { - int reg = param.select_register(REG_EAX); - emit_mov_r32_p32(dst, reg, param); // mov reg,param - emit_xor_m32_r32(dst, memref, reg); // xor [dest],reg - } + a.emit(opcode, dst, Gpd(param.ireg())); // op dst,param } -//------------------------------------------------- -// emit_shl_r32_p32 - shl operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_shl_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::shift_op_param(Assembler &a, Inst::Id const opcode, Operand const &dst, be_parameter const ¶m, std::function<bool(Assembler &a, Operand const &dst, be_parameter const &src)> optimize) { + Operand shift = cl; if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_shl_r32_imm(dst, reg, param.immediate()); // shl reg,param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_shl_r32_cl(dst, reg); // shl reg,cl - } -} + if (optimize(a, dst, param)) + return; - -//------------------------------------------------- -// emit_shl_m32_p32 - shl operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_shl_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_shl_m32_imm(dst, memref, param.immediate()); // shl [dest],param + shift = imm(param.immediate()); } else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_shl_m32_cl(dst, memref); // shl [dest],cl - } -} - + emit_mov_r32_p32(a, ecx, param); -//------------------------------------------------- -// emit_shr_r32_p32 - shr operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_shr_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_shr_r32_imm(dst, reg, param.immediate()); // shr reg,param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_shr_r32_cl(dst, reg); // shr reg,cl - } -} - - -//------------------------------------------------- -// emit_shr_m32_p32 - shr operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_shr_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_shr_m32_imm(dst, memref, param.immediate()); // shr [dest],param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_shr_m32_cl(dst, memref); // shr [dest],cl - } -} - - -//------------------------------------------------- -// emit_sar_r32_p32 - sar operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sar_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_sar_r32_imm(dst, reg, param.immediate()); // sar reg,param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_sar_r32_cl(dst, reg); // sar reg,cl - } -} - - -//------------------------------------------------- -// emit_sar_m32_p32 - sar operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sar_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_sar_m32_imm(dst, memref, param.immediate()); // sar [dest],param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_sar_m32_cl(dst, memref); // sar [dest],cl - } -} - - -//------------------------------------------------- -// emit_rol_r32_p32 - rol operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_rol_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_rol_r32_imm(dst, reg, param.immediate()); // rol reg,param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_rol_r32_cl(dst, reg); // rol reg,cl - } -} - - -//------------------------------------------------- -// emit_rol_m32_p32 - rol operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_rol_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_rol_m32_imm(dst, memref, param.immediate()); // rol [dest],param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_rol_m32_cl(dst, memref); // rol [dest],cl - } -} - - -//------------------------------------------------- -// emit_ror_r32_p32 - ror operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_ror_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_ror_r32_imm(dst, reg, param.immediate()); // ror reg,param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_ror_r32_cl(dst, reg); // ror reg,cl - } -} - - -//------------------------------------------------- -// emit_ror_m32_p32 - ror operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_ror_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_ror_m32_imm(dst, memref, param.immediate()); // ror [dest],param - } - else - { - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_ror_m32_cl(dst, memref); // ror [dest],cl - } -} - - -//------------------------------------------------- -// emit_rcl_r32_p32 - rcl operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_rcl_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_rcl_r32_imm(dst, reg, param.immediate()); // rcl reg,param - } - else - { - emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param - emit_rcl_r32_cl(dst, reg); // rcl reg,cl - } -} - - -//------------------------------------------------- -// emit_rcl_m32_p32 - rcl operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_rcl_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_rcl_m32_imm(dst, memref, param.immediate()); // rcl [dest],param - } - else - { - emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param - emit_rcl_m32_cl(dst, memref); // rcl [dest],cl - } -} - - -//------------------------------------------------- -// emit_rcr_r32_p32 - rcr operation to a 32-bit -// register from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_rcr_r32_p32(x86code *&dst, uint8_t reg, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_rcr_r32_imm(dst, reg, param.immediate()); // rcr reg,param - } - else - { - emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param - emit_rcr_r32_cl(dst, reg); // rcr reg,cl - } -} - - -//------------------------------------------------- -// emit_rcr_m32_p32 - rcr operation to a 32-bit -// memory location from a 32-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_rcr_m32_p32(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - if (param.is_immediate()) - { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - ;// skip - else - emit_rcr_m32_imm(dst, memref, param.immediate()); // rcr [dest],param - } - else - { - emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param - emit_rcr_m32_cl(dst, memref); // rcr [dest],cl - } + a.emit(opcode, dst, shift); } @@ -1622,39 +1049,39 @@ void drcbe_x86::emit_rcr_m32_p32(x86code *&dst, x86_memref memref, const be_para // into a pair of registers //------------------------------------------------- -void drcbe_x86::emit_mov_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m) +void drcbe_x86::emit_mov_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m) { if (param.is_immediate()) { - if (reglo == REG_NONE) + if (!reglo.isValid()) ; - else if ((uint32_t)param.immediate() == 0) - emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo + else if (u32(param.immediate()) == 0) + a.xor_(reglo, reglo); // xor reglo,reglo else - emit_mov_r32_imm(dst, reglo, param.immediate()); // mov reglo,param - if (reghi == REG_NONE) + a.mov(reglo, param.immediate()); // mov reglo,param + if (!reghi.isValid()) ; - else if ((uint32_t)(param.immediate() >> 32) == 0) - emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi + else if (u32(param.immediate() >> 32) == 0) + a.xor_(reghi, reghi); // xor reghi,reghi else - emit_mov_r32_imm(dst, reghi, param.immediate() >> 32); // mov reghi,param >> 32 + a.mov(reghi, param.immediate() >> 32); // mov reghi,param >> 32 } else if (param.is_memory()) { - int skip_lower = can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), reglo); - int skip_upper = can_skip_upper_load(dst, (uint32_t *)((uintptr_t)param.memory(4)), reghi); - if (reglo != REG_NONE && !skip_lower) - emit_mov_r32_m32(dst, reglo, MABS(param.memory())); // mov reglo,[param] - if (reghi != REG_NONE && !skip_upper) - emit_mov_r32_m32(dst, reghi, MABS(param.memory(4))); // mov reghi,[param+4] + int skip_lower = can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory(0)), reglo); + int skip_upper = can_skip_upper_load(a, (uint32_t *)((uintptr_t)param.memory(4)), reghi); + if (reglo.isValid() && !skip_lower) + a.mov(reglo, MABS(param.memory(0))); // mov reglo,[param] + if (reghi.isValid() && !skip_upper) + a.mov(reghi, MABS(param.memory(4))); // mov reghi,[param+4] } else if (param.is_int_register()) { - int skip_upper = can_skip_upper_load(dst, m_reghi[param.ireg()], reghi); - if (reglo != REG_NONE && reglo != param.ireg()) - emit_mov_r32_r32(dst, reglo, param.ireg()); // mov reglo,param - if (reghi != REG_NONE && !skip_upper) - emit_mov_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param] + int skip_upper = can_skip_upper_load(a, m_reghi[param.ireg()], reghi); + if (reglo.isValid() && reglo.id() != param.ireg()) + a.mov(reglo, Gpd(param.ireg())); // mov reglo,param + if (reghi.isValid() && !skip_upper) + a.mov(reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param] } } @@ -1665,31 +1092,31 @@ void drcbe_x86::emit_mov_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // affecting any flags //------------------------------------------------- -void drcbe_x86::emit_mov_r64_p64_keepflags(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m) +void drcbe_x86::emit_mov_r64_p64_keepflags(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m) { if (param.is_immediate()) { - if (reglo != REG_NONE) - emit_mov_r32_imm(dst, reglo, param.immediate()); // mov reglo,param - if (reghi != REG_NONE) - emit_mov_r32_imm(dst, reghi, param.immediate() >> 32); // mov reghi,param >> 32 + if (reglo.isValid()) + a.mov(reglo, param.immediate()); // mov reglo,param + if (reghi.isValid()) + a.mov(reghi, param.immediate() >> 32); // mov reghi,param >> 32 } else if (param.is_memory()) { - int skip_lower = can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), reglo); - int skip_upper = can_skip_upper_load(dst, (uint32_t *)((uintptr_t)param.memory(4)), reghi); - if (reglo != REG_NONE && !skip_lower) - emit_mov_r32_m32(dst, reglo, MABS(param.memory())); // mov reglo,[param] - if (reghi != REG_NONE && !skip_upper) - emit_mov_r32_m32(dst, reghi, MABS(param.memory(4))); // mov reghi,[param+4] + int skip_lower = can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory(0)), reglo); + int skip_upper = can_skip_upper_load(a, (uint32_t *)((uintptr_t)param.memory(4)), reghi); + if (reglo.isValid() && !skip_lower) + a.mov(reglo, MABS(param.memory(0))); // mov reglo,[param] + if (reghi.isValid() && !skip_upper) + a.mov(reghi, MABS(param.memory(4))); // mov reghi,[param+4] } else if (param.is_int_register()) { - int skip_upper = can_skip_upper_load(dst, m_reghi[param.ireg()], reghi); - if (reglo != REG_NONE && reglo != param.ireg()) - emit_mov_r32_r32(dst, reglo, param.ireg()); // mov reglo,param - if (reghi != REG_NONE && !skip_upper) - emit_mov_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param] + int skip_upper = can_skip_upper_load(a, m_reghi[param.ireg()], reghi); + if (reglo.isValid() && reglo.id() != param.ireg()) + a.mov(reglo, Gpd(param.ireg())); // mov reglo,param + if (reghi.isValid() && !skip_upper) + a.mov(reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param] } } @@ -1699,27 +1126,30 @@ void drcbe_x86::emit_mov_r64_p64_keepflags(x86code *&dst, uint8_t reglo, uint8_t // into a memory location //------------------------------------------------- -void drcbe_x86::emit_mov_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m) +void drcbe_x86::emit_mov_m64_p64(Assembler &a, Mem const &memref, be_parameter const ¶m) { + Mem memref_lo = memref.cloneAdjusted(0); memref_lo.setSize(4); + Mem memref_hi = memref.cloneAdjusted(4); memref_hi.setSize(4); + if (param.is_immediate()) { - emit_mov_m32_imm(dst, memref + 0, param.immediate()); // mov [mem],param - emit_mov_m32_imm(dst, memref + 4, param.immediate() >> 32); // mov [mem],param >> 32 + a.mov(memref_lo, param.immediate()); // mov [mem],param + a.mov(memref_hi, param.immediate() >> 32); // mov [mem],param >> 32 } else if (param.is_memory()) { - int skip_lower = can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), REG_EAX); + int skip_lower = can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory()), eax); if (!skip_lower) - emit_mov_r32_m32(dst, REG_EAX, MABS(param.memory())); // mov eax,[param] - emit_mov_m32_r32(dst, memref + 0, REG_EAX); // mov [mem],eax - emit_mov_r32_m32(dst, REG_EAX, MABS(param.memory(4))); // mov eax,[param+4] - emit_mov_m32_r32(dst, memref + 4, REG_EAX); // mov [mem+4],eax + a.mov(eax, MABS(param.memory(0))); // mov eax,[param] + a.mov(memref_lo, eax); // mov [mem],eax + a.mov(eax, MABS(param.memory(4))); // mov eax,[param+4] + a.mov(memref_hi, eax); // mov [mem+4],eax } else if (param.is_int_register()) { - emit_mov_m32_r32(dst, memref + 0, param.ireg()); // mov [mem],param - emit_mov_r32_m32(dst, REG_EAX, MABS(m_reghi[param.ireg()])); // mov eax,[param.hi] - emit_mov_m32_r32(dst, memref + 4, REG_EAX); // mov [mem+4],eax + a.mov(memref_lo, Gpd(param.ireg())); // mov [mem],param + a.mov(eax, MABS(m_reghi[param.ireg()])); // mov eax,[param.hi] + a.mov(memref_hi, eax); // mov [mem+4],eax } } @@ -1729,287 +1159,22 @@ void drcbe_x86::emit_mov_m64_p64(x86code *&dst, x86_memref memref, const be_para // into a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_mov_p64_r64(x86code *&dst, const be_parameter ¶m, uint8_t reglo, uint8_t reghi) +void drcbe_x86::emit_mov_p64_r64(Assembler &a, be_parameter const ¶m, Gp const ®lo, Gp const ®hi) { assert(!param.is_immediate()); if (param.is_memory()) { - emit_mov_m32_r32(dst, MABS(param.memory()), reglo); // mov [param],reglo - emit_mov_m32_r32(dst, MABS(param.memory(4)), reghi); // mov [param+4],reghi - } - else if (param.is_int_register()) - { - if (reglo != param.ireg()) - emit_mov_r32_r32(dst, param.ireg(), reglo); // mov param,reglo - emit_mov_m32_r32(dst, MABS(m_reghi[param.ireg()]), reghi); // mov reghi[param],reghi - } - set_last_lower_reg(dst, param, reglo); - set_last_upper_reg(dst, param, reghi); -} - - -//------------------------------------------------- -// emit_add_r64_p64 - add operation to a 64-bit -// pair of registers from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_add_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_memory()) - { - emit_add_r32_m32(dst, reglo, MABS(param.memory())); // add reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_adc_r32_m32(dst, reghi, MABS(param.memory(4))); // adc reghi,[param] - } - else if (param.is_immediate()) - { - emit_add_r32_imm(dst, reglo, param.immediate()); // add reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_adc_r32_imm(dst, reghi, param.immediate() >> 32); // adc reghi,param >> 32 + a.mov(MABS(param.memory(0)), reglo); // mov [param],reglo + a.mov(MABS(param.memory(4)), reghi); // mov [param+4],reghi } else if (param.is_int_register()) { - emit_add_r32_r32(dst, reglo, param.ireg()); // add reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_adc_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // adc reghi,reghi[param] + if (reglo.id() != param.ireg()) + a.mov(Gpd(param.ireg()), reglo); // mov param,reglo + a.mov(MABS(m_reghi[param.ireg()]), reghi); // mov reghi[param],reghi } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_add_m64_p64 - add operation to a 64-bit -// memory location from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_add_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_immediate()) - { - emit_add_m32_imm(dst, memref, param.immediate()); // add [dest],param - if (saveflags) emit_pushf(dst); // pushf - emit_adc_m32_imm(dst, memref + 4, param.immediate() >> 32); // adc [dest+4],param >> 32 - } - else - { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_add_m32_r32(dst, memref, reglo); // add [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_adc_m32_r32(dst, memref + 4, REG_EDX); // adc [dest+4],edx - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_adc_r64_p64 - adc operation to a 64-bit -// pair of registers from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_adc_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_memory()) - { - emit_adc_r32_m32(dst, reglo, MABS(param.memory())); // adc reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_adc_r32_m32(dst, reghi, MABS(param.memory(4))); // adc reghi,[param] - } - else if (param.is_immediate()) - { - emit_adc_r32_imm(dst, reglo, param.immediate()); // adc reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_adc_r32_imm(dst, reghi, param.immediate() >> 32); // adc reghi,param >> 32 - } - else if (param.is_int_register()) - { - emit_adc_r32_r32(dst, reglo, param.ireg()); // adc reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_adc_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // adc reghi,reghi[param] - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_adc_m64_p64 - adc operation to a 64-bit -// memory location from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_adc_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_immediate()) - { - emit_adc_m32_imm(dst, memref, param.immediate()); // adc [dest],param - if (saveflags) emit_pushf(dst); // pushf - emit_adc_m32_imm(dst, memref + 4, param.immediate() >> 32); // adc [dest+4],param >> 32 - } - else - { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64_keepflags(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_adc_m32_r32(dst, memref, reglo); // adc [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_adc_m32_r32(dst, memref + 4, REG_EDX); // adc [dest+4],edx - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_sub_r64_p64 - sub operation to a 64-bit -// pair of registers from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sub_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_memory()) - { - emit_sub_r32_m32(dst, reglo, MABS(param.memory())); // sub reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_m32(dst, reghi, MABS(param.memory(4))); // sbb reghi,[param] - } - else if (param.is_immediate()) - { - emit_sub_r32_imm(dst, reglo, param.immediate()); // sub reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_imm(dst, reghi, param.immediate() >> 32); // sbb reghi,param >> 32 - } - else if (param.is_int_register()) - { - emit_sub_r32_r32(dst, reglo, param.ireg()); // sub reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // sbb reghi,reghi[param] - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_sub_m64_p64 - sub operation to a 64-bit -// memory location from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sub_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_immediate()) - { - emit_sub_m32_imm(dst, memref, param.immediate()); // sub [dest],param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_m32_imm(dst, memref + 4, param.immediate() >> 32); // sbb [dest+4],param >> 32 - } - else - { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_sub_m32_r32(dst, memref, reglo); // sub [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_m32_r32(dst, memref + 4, REG_EDX); // sbb [dest+4],edx - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_sbb_r64_p64 - sbb operation to a 64-bit -// pair of registers from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sbb_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_memory()) - { - emit_sbb_r32_m32(dst, reglo, MABS(param.memory())); // sbb reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_m32(dst, reghi, MABS(param.memory(4))); // sbb reghi,[param] - } - else if (param.is_immediate()) - { - emit_sbb_r32_imm(dst, reglo, param.immediate()); // sbb reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_imm(dst, reghi, param.immediate() >> 32); // sbb reghi,param >> 32 - } - else if (param.is_int_register()) - { - emit_sbb_r32_r32(dst, reglo, param.ireg()); // sbb reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // sbb reghi,reghi[param] - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_sbb_m64_p64 - sbb operation to a 64-bit -// memory location from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_sbb_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_immediate()) - { - emit_sbb_m32_imm(dst, memref, param.immediate()); // sbb [dest],param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_m32_imm(dst, memref + 4, param.immediate() >> 32); // sbb [dest+4],param >> 32 - } - else - { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64_keepflags(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_sbb_m32_r32(dst, memref, reglo); // sbb [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_m32_r32(dst, memref + 4, REG_EDX); // sbb [dest+4],edx - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_cmp_r64_p64 - sub operation to a 64-bit -// pair of registers from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_cmp_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = (inst.flags() != FLAG_Z && (inst.flags() & FLAG_Z) != 0); - if (param.is_memory()) - { - emit_sub_r32_m32(dst, reglo, MABS(param.memory())); // sub reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_m32(dst, reghi, MABS(param.memory(4))); // sbb reghi,[param] - } - else if (param.is_immediate()) - { - emit_sub_r32_imm(dst, reglo, param.immediate()); // sub reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_imm(dst, reghi, param.immediate() >> 32); // sbb reghi,param >> 32 - } - else if (param.is_int_register()) - { - emit_sub_r32_r32(dst, reglo, param.ireg()); // sub reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_sbb_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // sbb reghi,reghi[param] - } - if (inst.flags() == FLAG_Z) - emit_or_r32_r32(dst, reghi, reglo); // or reghi,reglo - else if (saveflags) - emit_combine_z_flags(dst); + set_last_lower_reg(a, param, reglo); + set_last_upper_reg(a, param, reghi); } @@ -2018,39 +1183,39 @@ void drcbe_x86::emit_cmp_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_and_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_and_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_memory()) { - emit_and_r32_m32(dst, reglo, MABS(param.memory())); // and reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_and_r32_m32(dst, reghi, MABS(param.memory(4))); // and reghi,[param] + a.and_(reglo, MABS(param.memory(0))); // and reglo,[param] + if (saveflags) a.pushfd(); // pushf + a.and_(reghi, MABS(param.memory(4))); // and reghi,[param] } else if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) + if (!inst.flags() && u32(param.immediate()) == 0xffffffffU) ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo + else if (!inst.flags() && u32(param.immediate()) == 0) + a.xor_(reglo, reglo); // xor reglo,reglo else - emit_and_r32_imm(dst, reglo, param.immediate()); // and reglo,param - if (saveflags) emit_pushf(dst); // pushf - if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff) + a.and_(reglo, param.immediate()); // and reglo,param + if (saveflags) a.pushfd(); // pushf + if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU) ;// skip - else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0) - emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi + else if (!inst.flags() && u32(param.immediate() >> 32) == 0) + a.xor_(reghi, reghi); // xor reghi,reghi else - emit_and_r32_imm(dst, reghi, param.immediate() >> 32); // and reghi,param >> 32 + a.and_(reghi, param.immediate() >> 32); // and reghi,param >> 32 } else if (param.is_int_register()) { - emit_and_r32_r32(dst, reglo, param.ireg()); // and reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_and_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // and reghi,reghi[param] + a.and_(reglo, Gpd(param.ireg())); // and reglo,param + if (saveflags) a.pushfd(); // pushf + a.and_(reghi, MABS(m_reghi[param.ireg()])); // and reghi,reghi[param] } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2059,93 +1224,35 @@ void drcbe_x86::emit_and_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // memory location from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_and_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_and_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) + if (!inst.flags() && u32(param.immediate()) == 0xffffffffU) ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) - emit_mov_m32_imm(dst, memref, 0); // mov [dest],0 + else if (!inst.flags() && u32(param.immediate()) == 0) + a.mov(memref_lo, 0); // mov [dest],0 else - emit_and_m32_imm(dst, memref, param.immediate()); // and [dest],param - if (saveflags) emit_pushf(dst); // pushf - if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff) + a.and_(memref_lo, param.immediate()); // and [dest],param + if (saveflags) a.pushfd(); // pushf + if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU) ;// skip - else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0) - emit_mov_m32_imm(dst, memref + 4, 0); // mov [dest+4],0 + else if (!inst.flags() && u32(param.immediate() >> 32) == 0) + a.mov(memref_hi, 0); // mov [dest+4],0 else - emit_and_m32_imm(dst, memref + 4, param.immediate() >> 32); // and [dest+4],param >> 32 - } - else - { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_and_m32_r32(dst, memref, reglo); // and [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_and_m32_r32(dst, memref + 4, REG_EDX); // and [dest+4],edx - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_test_r64_p64 - test operation to a 64-bit -// pair of registers from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_test_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_memory()) - { - emit_test_m32_r32(dst, MABS(param.memory()), reglo); // test [param],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_test_m32_r32(dst, MABS(param.memory(4)), reghi); // test [param],reghi - } - else if (param.is_immediate()) - { - emit_test_r32_imm(dst, reglo, param.immediate()); // test reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_test_r32_imm(dst, reghi, param.immediate() >> 32); // test reghi,param >> 32 - } - else if (param.is_int_register()) - { - emit_test_r32_r32(dst, reglo, param.ireg()); // test reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_test_m32_r32(dst, MABS(m_reghi[param.ireg()]), reghi); // test reghi[param],reghi - } - if (saveflags) - emit_combine_z_flags(dst); -} - - -//------------------------------------------------- -// emit_test_m64_p64 - test operation to a 64-bit -// memory location from a 64-bit parameter -//------------------------------------------------- - -void drcbe_x86::emit_test_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) -{ - int saveflags = ((inst.flags() & FLAG_Z) != 0); - if (param.is_immediate()) - { - emit_test_m32_imm(dst, memref, param.immediate()); // test [dest],param - if (saveflags) emit_pushf(dst); // pushf - emit_test_m32_imm(dst, memref + 4, param.immediate() >> 32); // test [dest+4],param >> 32 + a.and_(memref_hi, param.immediate() >> 32); // and [dest+4],param >> 32 } else { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_test_m32_r32(dst, memref, reglo); // test [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_test_m32_r32(dst, memref + 4, REG_EDX); // test [dest+4],edx + Gp const reglo = (param.is_int_register()) ? Gpd(param.ireg()) : eax; + emit_mov_r64_p64(a, reglo, edx, param); // mov edx:reglo,param + a.and_(memref_lo, reglo); // and [dest],reglo + if (saveflags) a.pushfd(); // pushf + a.and_(memref_hi, edx); // and [dest+4],edx } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2154,39 +1261,39 @@ void drcbe_x86::emit_test_m64_p64(x86code *&dst, x86_memref memref, const be_par // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_or_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_or_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_memory()) { - emit_or_r32_m32(dst, reglo, MABS(param.memory())); // or reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_or_r32_m32(dst, reghi, MABS(param.memory(4))); // or reghi,[param] + a.or_(reglo, MABS(param.memory(0))); // or reglo,[param] + if (saveflags) a.pushfd(); // pushf + a.or_(reghi, MABS(param.memory(4))); // or reghi,[param] } else if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) + if (!inst.flags() && u32(param.immediate()) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_mov_r32_imm(dst, reglo, ~0); // mov reglo,-1 + else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU) + a.mov(reglo, ~0); // mov reglo,-1 else - emit_or_r32_imm(dst, reglo, param.immediate()); // or reglo,param - if (saveflags) emit_pushf(dst); // pushf - if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0) + a.or_(reglo, param.immediate()); // or reglo,param + if (saveflags) a.pushfd(); // pushf + if (!inst.flags() && u32(param.immediate() >> 32) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff) - emit_mov_r32_imm(dst, reghi, ~0); // mov reghi,-1 + else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU) + a.mov(reghi, ~0); // mov reghi,-1 else - emit_or_r32_imm(dst, reghi, param.immediate() >> 32); // or reghi,param >> 32 + a.or_(reghi, param.immediate() >> 32); // or reghi,param >> 32 } else if (param.is_int_register()) { - emit_or_r32_r32(dst, reglo, param.ireg()); // or reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_or_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // or reghi,reghi[param] + a.or_(reglo, Gpd(param.ireg())); // or reglo,param + if (saveflags) a.pushfd(); // pushf + a.or_(reghi, MABS(m_reghi[param.ireg()])); // or reghi,reghi[param] } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2195,35 +1302,35 @@ void drcbe_x86::emit_or_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, con // memory location from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_or_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_or_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) + if (!inst.flags() && u32(param.immediate()) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_mov_m32_imm(dst, memref, ~0); // mov [dest],-1 + else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU) + a.mov(memref_lo, ~0); // mov [dest],-1 else - emit_or_m32_imm(dst, memref, param.immediate()); // or [dest],param - if (saveflags) emit_pushf(dst); // pushf - if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0) + a.or_(memref_lo, param.immediate()); // or [dest],param + if (saveflags) a.pushfd(); // pushf + if (!inst.flags() && u32(param.immediate() >> 32) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff) - emit_mov_m32_imm(dst, memref + 4, ~0); // mov [dest+4],-1 + else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU) + a.mov(memref_hi, ~0); // mov [dest+4],-1 else - emit_or_m32_imm(dst, memref + 4, param.immediate() >> 32); // or [dest+4],param >> 32 + a.or_(memref_hi, param.immediate() >> 32); // or [dest+4],param >> 32 } else { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_or_m32_r32(dst, memref, reglo); // or [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_or_m32_r32(dst, memref + 4, REG_EDX); // or [dest+4],edx + Gp const reglo = (param.is_int_register()) ? Gpd(param.ireg()) : eax; + emit_mov_r64_p64(a, reglo, edx, param); // mov edx:reglo,param + a.or_(memref_lo, reglo); // or [dest],reglo + if (saveflags) a.pushfd(); // pushf + a.or_(memref_hi, edx); // or [dest+4],edx } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2232,39 +1339,39 @@ void drcbe_x86::emit_or_m64_p64(x86code *&dst, x86_memref memref, const be_param // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_xor_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_xor_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_memory()) { - emit_xor_r32_m32(dst, reglo, MABS(param.memory())); // xor reglo,[param] - if (saveflags) emit_pushf(dst); // pushf - emit_xor_r32_m32(dst, reghi, MABS(param.memory(4))); // xor reghi,[param] + a.xor_(reglo, MABS(param.memory(0))); // xor reglo,[param] + if (saveflags) a.pushfd(); // pushf + a.xor_(reghi, MABS(param.memory(4))); // xor reghi,[param] } else if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) + if (!inst.flags() && u32(param.immediate()) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_not_r32(dst, reglo); // not reglo + else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU) + a.not_(reglo); // not reglo else - emit_xor_r32_imm(dst, reglo, param.immediate()); // xor reglo,param - if (saveflags) emit_pushf(dst); // pushf - if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0) + a.xor_(reglo, param.immediate()); // xor reglo,param + if (saveflags) a.pushfd(); // pushf + if (!inst.flags() && u32(param.immediate() >> 32) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff) - emit_not_r32(dst, reghi); // not reghi + else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU) + a.not_(reghi); // not reghi else - emit_xor_r32_imm(dst, reghi, param.immediate() >> 32); // xor reghi,param >> 32 + a.xor_(reghi, param.immediate() >> 32); // xor reghi,param >> 32 } else if (param.is_int_register()) { - emit_xor_r32_r32(dst, reglo, param.ireg()); // xor reglo,param - if (saveflags) emit_pushf(dst); // pushf - emit_xor_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // xor reghi,reghi[param] + a.xor_(reglo, Gpd(param.ireg())); // xor reglo,param + if (saveflags) a.pushfd(); // pushf + a.xor_(reghi, MABS(m_reghi[param.ireg()])); // xor reghi,reghi[param] } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2273,35 +1380,35 @@ void drcbe_x86::emit_xor_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // memory location from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_xor_m64_p64(x86code *&dst, x86_memref memref, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_xor_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { - if (inst.flags() == 0 && (uint32_t)param.immediate() == 0) + if (!inst.flags() && u32(param.immediate()) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff) - emit_not_m32(dst, memref); // not [dest] + else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU) + a.not_(memref_lo); // not [dest] else - emit_xor_m32_imm(dst, memref, param.immediate()); // xor [dest],param - if (saveflags) emit_pushf(dst); // pushf - if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0) + a.xor_(memref_lo, param.immediate()); // xor [dest],param + if (saveflags) a.pushfd(); // pushf + if (!inst.flags() && u32(param.immediate() >> 32) == 0) ;// skip - else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff) - emit_not_m32(dst, memref + 4); // not [dest+4] + else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU) + a.not_(memref_hi); // not [dest+4] else - emit_xor_m32_imm(dst, memref + 4, param.immediate() >> 32); // xor [dest+4],param >> 32 + a.xor_(memref_hi, param.immediate() >> 32); // xor [dest+4],param >> 32 } else { - int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX; - emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param - emit_xor_m32_r32(dst, memref, reglo); // xor [dest],reglo - if (saveflags) emit_pushf(dst); // pushf - emit_xor_m32_r32(dst, memref + 4, REG_EDX); // xor [dest+4],edx + Gp const reglo = (param.is_int_register()) ? Gpd(param.ireg()) : eax; + emit_mov_r64_p64(a, reglo, edx, param); // mov edx:reglo,param + a.xor_(memref_lo, reglo); // xor [dest],reglo + if (saveflags) a.pushfd(); // pushf + a.xor_(memref_hi, edx); // xor [dest+4],edx } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2310,13 +1417,13 @@ void drcbe_x86::emit_xor_m64_p64(x86code *&dst, x86_memref memref, const be_para // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_shl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_shl_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = (inst.flags() != 0); if (param.is_immediate()) { int count = param.immediate() & 63; - if (inst.flags() == 0 && count == 0) + if (!inst.flags() && count == 0) ;// skip else { @@ -2324,55 +1431,58 @@ void drcbe_x86::emit_shl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co { if (inst.flags() != 0) { - emit_shld_r32_r32_imm(dst, reghi, reglo, 31); // shld reghi,reglo,31 - emit_shl_r32_imm(dst, reglo, 31); // shl reglo,31 + a.shld(reghi, reglo, 31); // shld reghi,reglo,31 + a.shl(reglo, 31); // shl reglo,31 count -= 31; } else { - emit_mov_r32_r32(dst, reghi, reglo); // mov reghi,reglo - emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo + a.mov(reghi, reglo); // mov reghi,reglo + a.xor_(reglo, reglo); // xor reglo,reglo count -= 32; } } if (inst.flags() != 0 || count > 0) { - emit_shld_r32_r32_imm(dst, reghi, reglo, count); // shld reghi,reglo,count - if (saveflags) emit_pushf(dst); // pushf - emit_shl_r32_imm(dst, reglo, count); // shl reglo,count + a.shld(reghi, reglo, count); // shld reghi,reglo,count + if (saveflags) a.pushfd(); // pushf + a.shl(reglo, count); // shl reglo,count } } } else { - emit_link skip1, skip2; - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1 + Label skip1 = a.newLabel(); + Label skip2 = a.newLabel(); + emit_mov_r32_p32(a, ecx, param); // mov ecx,param + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip1); // jz skip1 if (inst.flags() != 0) { - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_shld_r32_r32_imm(dst, reghi, reglo, 31); // shld reghi,reglo,31 - emit_shl_r32_imm(dst, reglo, 31); // shl reglo,31 - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2 - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_shld_r32_r32_imm(dst, reghi, reglo, 31); // shld reghi,reglo,31 - emit_shl_r32_imm(dst, reglo, 31); // shl reglo,31 - track_resolve_link(dst, skip2); // skip2: + a.sub(ecx, 31); // sub ecx,31 + a.shld(reghi, reglo, 31); // shld reghi,reglo,31 + a.shl(reglo, 31); // shl reglo,31 + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip2); // jz skip2 + a.sub(ecx, 31); // sub ecx,31 + a.shld(reghi, reglo, 31); // shld reghi,reglo,31 + a.shl(reglo, 31); // shl reglo,31 + a.bind(skip2); // skip2: + reset_last_upper_lower_reg(); } else { - emit_mov_r32_r32(dst, reghi, reglo); // mov reghi,reglo - emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo + a.mov(reghi, reglo); // mov reghi,reglo + a.xor_(reglo, reglo); // xor reglo,reglo } - track_resolve_link(dst, skip1); // skip1: - emit_shld_r32_r32_cl(dst, reghi, reglo); // shld reghi,reglo,cl - if (saveflags) emit_pushf(dst); // pushf - emit_shl_r32_cl(dst, reglo); // shl reglo,cl + a.bind(skip1); // skip1: + reset_last_upper_lower_reg(); + a.shld(reghi, reglo, cl); // shld reghi,reglo,cl + if (saveflags) a.pushfd(); // pushf + a.shl(reglo, cl); // shl reglo,cl } if (saveflags) - emit_combine_z_shl_flags(dst); + emit_combine_z_shl_flags(a); } @@ -2381,13 +1491,13 @@ void drcbe_x86::emit_shl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_shr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_shr_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { int count = param.immediate() & 63; - if (inst.flags() == 0 && count == 0) + if (!inst.flags() && count == 0) ;// skip else { @@ -2395,55 +1505,58 @@ void drcbe_x86::emit_shr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co { if (inst.flags() != 0) { - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_shr_r32_imm(dst, reghi, 31); // shr reghi,31 + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.shr(reghi, 31); // shr reghi,31 count -= 31; } else { - emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi - emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi + a.mov(reglo, reghi); // mov reglo,reghi + a.xor_(reghi, reghi); // xor reghi,reghi count -= 32; } } if (inst.flags() != 0 || count > 0) { - emit_shrd_r32_r32_imm(dst, reglo, reghi, count); // shrd reglo,reghi,count - if (saveflags) emit_pushf(dst); // pushf - emit_shr_r32_imm(dst, reghi, count); // shr reghi,count + a.shrd(reglo, reghi, count); // shrd reglo,reghi,count + if (saveflags) a.pushfd(); // pushf + a.shr(reghi, count); // shr reghi,count } } } else { - emit_link skip1, skip2; - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1 + Label skip1 = a.newLabel(); + Label skip2 = a.newLabel(); + emit_mov_r32_p32(a, ecx, param); // mov ecx,param + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip1); // jz skip1 if (inst.flags() != 0) { - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_shr_r32_imm(dst, reghi, 31); // shr reghi,31 - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2 - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_shr_r32_imm(dst, reghi, 31); // shr reghi,31 - track_resolve_link(dst, skip2); // skip2: + a.sub(ecx, 31); // sub ecx,31 + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.shr(reghi, 31); // shr reghi,31 + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip2); // jz skip2 + a.sub(ecx, 31); // sub ecx,31 + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.shr(reghi, 31); // shr reghi,31 + a.bind(skip2); // skip2: + reset_last_upper_lower_reg(); } else { - emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi - emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi + a.mov(reglo, reghi); // mov reglo,reghi + a.xor_(reghi, reghi); // xor reghi,reghi } - track_resolve_link(dst, skip1); // skip1: - emit_shrd_r32_r32_cl(dst, reglo, reghi); // shrd reglo,reghi,cl - if (saveflags) emit_pushf(dst); // pushf - emit_shr_r32_cl(dst, reghi); // shr reghi,cl + a.bind(skip1); // skip1: + reset_last_upper_lower_reg(); + a.shrd(reglo, reghi, cl); // shrd reglo,reghi,cl + if (saveflags) a.pushfd(); // pushf + a.shr(reghi, cl); // shr reghi,cl } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2452,13 +1565,13 @@ void drcbe_x86::emit_shr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_sar_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_sar_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { int count = param.immediate() & 63; - if (inst.flags() == 0 && count == 0) + if (!inst.flags() && count == 0) ;// skip else { @@ -2466,55 +1579,58 @@ void drcbe_x86::emit_sar_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co { if (inst.flags() != 0) { - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31 + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.sar(reghi, 31); // sar reghi,31 count -= 31; } else { - emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi - emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31 + a.mov(reglo, reghi); // mov reglo,reghi + a.sar(reghi, 31); // sar reghi,31 count -= 32; } } if (inst.flags() != 0 || count > 0) { - emit_shrd_r32_r32_imm(dst, reglo, reghi, count); // shrd reglo,reghi,count - if (saveflags) emit_pushf(dst); // pushf - emit_sar_r32_imm(dst, reghi, count); // sar reghi,count + a.shrd(reglo, reghi, count); // shrd reglo,reghi,count + if (saveflags) a.pushfd(); // pushf + a.sar(reghi, count); // sar reghi,count } } } else { - emit_link skip1, skip2; - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1 + Label skip1 = a.newLabel(); + Label skip2 = a.newLabel(); + emit_mov_r32_p32(a, ecx, param); // mov ecx,param + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip1); // jz skip1 if (inst.flags() != 0) { - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31 - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31 - track_resolve_link(dst, skip2); // skip2: + a.sub(ecx, 31); // sub ecx,31 + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.sar(reghi, 31); // sar reghi,31 + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip2); // jz skip + a.sub(ecx, 31); // sub ecx,31 + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.sar(reghi, 31); // sar reghi,31 + a.bind(skip2); // skip2: + reset_last_upper_lower_reg(); } else { - emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi - emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31 + a.mov(reglo, reghi); // mov reglo,reghi + a.sar(reghi, 31); // sar reghi,31 } - track_resolve_link(dst, skip1); // skip1: - emit_shrd_r32_r32_cl(dst, reglo, reghi); // shrd reglo,reghi,cl - if (saveflags) emit_pushf(dst); // pushf - emit_sar_r32_cl(dst, reghi); // sar reghi,cl + a.bind(skip1); // skip1: + reset_last_upper_lower_reg(); + a.shrd(reglo, reghi, cl); // shrd reglo,reghi,cl + if (saveflags) a.pushfd(); // pushf + a.sar(reghi, cl); // sar reghi,cl } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2523,13 +1639,13 @@ void drcbe_x86::emit_sar_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_rol_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_rol_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { int count = param.immediate() & 63; - if (inst.flags() == 0 && count == 0) + if (!inst.flags() && count == 0) ;// skip else { @@ -2537,59 +1653,61 @@ void drcbe_x86::emit_rol_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co { if (inst.flags() != 0) { - emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo - emit_shld_r32_r32_imm(dst, reglo, reghi, 31); // shld reglo,reghi,31 - emit_shld_r32_r32_imm(dst, reghi, REG_ECX, 31); // shld reghi,ecx,31 + a.mov(ecx, reglo); // mov ecx,reglo + a.shld(reglo, reghi, 31); // shld reglo,reghi,31 + a.shld(reghi, ecx, 31); // shld reghi,ecx,31 count -= 31; } else { - emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo + a.xchg(reghi, reglo); // xchg reghi,reglo count -= 32; } } if (inst.flags() != 0 || count > 0) { - emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo - emit_shld_r32_r32_imm(dst, reglo, reghi, count); // shld reglo,reghi,count - if (saveflags) emit_pushf(dst); // pushf - emit_shld_r32_r32_imm(dst, reghi, REG_ECX, count); // shld reghi,ecx,count + a.mov(ecx, reglo); // mov ecx,reglo + a.shld(reglo, reghi, count); // shld reglo,reghi,count + if (saveflags) a.pushfd(); // pushf + a.shld(reghi, ecx, count); // shld reghi,ecx,count } } } else { - emit_link skip1, skip2; - int tempreg = REG_EBX; - emit_mov_m32_r32(dst, MBD(REG_ESP, -8), tempreg); // mov [esp-8],ebx - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1 + Label skip1 = a.newLabel(); + Label skip2 = a.newLabel(); + a.mov(ptr(esp, -8), ebx); // mov [esp-8],ebx + emit_mov_r32_p32(a, ecx, param); // mov ecx,param + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip1); // jz skip1 if (inst.flags() != 0) { - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo - emit_shld_r32_r32_imm(dst, reglo, reghi, 31); // shld reglo,reghi,31 - emit_shld_r32_r32_imm(dst, reghi, tempreg, 31); // shld reghi,ebx,31 - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2 - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo - emit_shld_r32_r32_imm(dst, reglo, reghi, 31); // shld reglo,reghi,31 - emit_shld_r32_r32_imm(dst, reghi, tempreg, 31); // shld reghi,ebx,31 - track_resolve_link(dst, skip2); // skip2: + a.sub(ecx, 31); // sub ecx,31 + a.mov(ebx, reglo); // mov ebx,reglo + a.shld(reglo, reghi, 31); // shld reglo,reghi,31 + a.shld(reghi, ebx, 31); // shld reghi,ebx,31 + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip2); // jz skip2 + a.sub(ecx, 31); // sub ecx,31 + a.mov(ebx, reglo); // mov ebx,reglo + a.shld(reglo, reghi, 31); // shld reglo,reghi,31 + a.shld(reghi, ebx, 31); // shld reghi,ebx,31 + a.bind(skip2); // skip2: + reset_last_upper_lower_reg(); } else - emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo - track_resolve_link(dst, skip1); // skip1: - emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo - emit_shld_r32_r32_cl(dst, reglo, reghi); // shld reglo,reghi,cl - if (saveflags) emit_pushf(dst); // pushf - emit_shld_r32_r32_cl(dst, reghi, tempreg); // shld reghi,ebx,cl - emit_mov_r32_m32(dst, tempreg, MBD(REG_ESP, saveflags ? -4 : -8)); // mov ebx,[esp-8] + a.xchg(reghi, reglo); // xchg reghi,reglo + a.bind(skip1); // skip1: + reset_last_upper_lower_reg(); + a.mov(ebx, reglo); // mov ebx,reglo + a.shld(reglo, reghi, cl); // shld reglo,reghi,cl + if (saveflags) a.pushfd(); // pushf + a.shld(reghi, ebx, cl); // shld reghi,ebx,cl + a.mov(ebx, ptr(esp, saveflags ? -4 : -8)); // mov ebx,[esp-8] } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2598,13 +1716,13 @@ void drcbe_x86::emit_rol_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_ror_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_ror_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); if (param.is_immediate()) { int count = param.immediate() & 63; - if (inst.flags() == 0 && count == 0) + if (!inst.flags() && count == 0) ;// skip else { @@ -2612,59 +1730,61 @@ void drcbe_x86::emit_ror_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co { if (inst.flags() != 0) { - emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_shrd_r32_r32_imm(dst, reghi, REG_ECX, 31); // shrd reghi,ecx,31 + a.mov(ecx, reglo); // mov ecx,reglo + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.shrd(reghi, ecx, 31); // shrd reghi,ecx,31 count -= 31; } else { - emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo + a.xchg(reghi, reglo); // xchg reghi,reglo count -= 32; } } if (inst.flags() != 0 || count > 0) { - emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo - emit_shrd_r32_r32_imm(dst, reglo, reghi, count); // shrd reglo,reghi,count - if (saveflags) emit_pushf(dst); // pushf - emit_shrd_r32_r32_imm(dst, reghi, REG_ECX, count); // shrd reghi,ecx,count + a.mov(ecx, reglo); // mov ecx,reglo + a.shrd(reglo, reghi, count); // shrd reglo,reghi,count + if (saveflags) a.pushfd(); // pushf + a.shrd(reghi, ecx, count); // shrd reghi,ecx,count } } } else { - emit_link skip1, skip2; - int tempreg = REG_EBX; - emit_mov_m32_r32(dst, MBD(REG_ESP, -8), tempreg); // mov [esp-8],ebx - emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1 + Label skip1 = a.newLabel(); + Label skip2 = a.newLabel(); + a.mov(ptr(esp, -8), ebx); // mov [esp-8],ebx + emit_mov_r32_p32(a, ecx, param); // mov ecx,param + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip1); // jz skip1 if (inst.flags() != 0) { - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_shrd_r32_r32_imm(dst, reghi, tempreg, 31); // shrd reghi,ebx,31 - emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20 - emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2 - emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31 - emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo - emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31 - emit_shrd_r32_r32_imm(dst, reghi, tempreg, 31); // shrd reghi,ebx,31 - track_resolve_link(dst, skip2); // skip2: + a.sub(ecx, 31); // sub ecx,31 + a.mov(ebx, reglo); // mov ebx,reglo + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.shrd(reghi, ebx, 31); // shrd reghi,ebx,31 + a.test(ecx, 0x20); // test ecx,0x20 + a.short_().jz(skip2); // jz skip2 + a.sub(ecx, 31); // sub ecx,31 + a.mov(ebx, reglo); // mov ebx,reglo + a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31 + a.shrd(reghi, ebx, 31); // shrd reghi,ebx,31 + a.bind(skip2); // skip2: + reset_last_upper_lower_reg(); } else - emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo - track_resolve_link(dst, skip1); // skip1: - emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo - emit_shrd_r32_r32_cl(dst, reglo, reghi); // shrd reglo,reghi,cl - if (saveflags) emit_pushf(dst); // pushf - emit_shrd_r32_r32_cl(dst, reghi, tempreg); // shrd reghi,ebx,cl - emit_mov_r32_m32(dst, tempreg, MBD(REG_ESP, saveflags ? -4 : -8)); // mov ebx,[esp-8] + a.xchg(reghi, reglo); // xchg reghi,reglo + a.bind(skip1); // skip1: + reset_last_upper_lower_reg(); + a.mov(ebx, reglo); // mov ebx,reglo + a.shrd(reglo, reghi, cl); // shrd reglo,reghi,cl + if (saveflags) a.pushfd(); // pushf + a.shrd(reghi, ebx, cl); // shrd reghi,ebx,cl + a.mov(ebx, ptr(esp, saveflags ? -4 : -8)); // mov ebx,[esp-8] } if (saveflags) - emit_combine_z_flags(dst); + emit_combine_z_flags(a); } @@ -2673,39 +1793,43 @@ void drcbe_x86::emit_ror_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_rcl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_rcl_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = ((inst.flags() & FLAG_Z) != 0); - emit_link skipall, skiploop; - x86code *loop; + Label loop = a.newLabel(); + Label skipall = a.newLabel(); + Label skiploop = a.newLabel(); - emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param + emit_mov_r32_p32_keepflags(a, ecx, param); // mov ecx,param if (!saveflags) { - loop = dst; // loop: - emit_jecxz_link(dst, skipall); // jecxz skipall - emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1] - emit_rcl_r32_imm(dst, reglo, 1); // rcl reglo,1 - emit_rcl_r32_imm(dst, reghi, 1); // rcl reghi,1 - emit_jmp(dst, loop); // jmp loop - track_resolve_link(dst, skipall); // skipall: + a.bind(loop); // loop: + a.jecxz(skipall); // jecxz skipall + a.lea(ecx, ptr(ecx, -1)); // lea ecx,[ecx-1] + a.rcl(reglo, 1); // rcl reglo,1 + a.rcl(reghi, 1); // rcl reghi,1 + a.jmp(loop); // jmp loop + a.bind(skipall); // skipall: + reset_last_upper_lower_reg(); } else { - emit_jecxz_link(dst, skipall); // jecxz skipall - emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1] - loop = dst; // loop: - emit_jecxz_link(dst, skiploop); // jecxz skiploop - emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1] - emit_rcl_r32_imm(dst, reglo, 1); // rcl reglo,1 - emit_rcl_r32_imm(dst, reghi, 1); // rcl reghi,1 - emit_jmp(dst, loop); // jmp loop - track_resolve_link(dst, skiploop); // skiploop: - emit_rcl_r32_imm(dst, reglo, 1); // rcl reglo,1 - emit_pushf(dst); // pushf - emit_rcl_r32_imm(dst, reghi, 1); // rcl reghi,1 - track_resolve_link(dst, skipall); // skipall: - emit_combine_z_flags(dst); + a.jecxz(skipall); // jecxz skipall + a.lea(ecx, ptr(ecx, -1)); // lea ecx,[ecx-1] + a.bind(loop); // loop: + a.jecxz(skiploop); // jecxz skiploop + a.lea(ecx, ptr(ecx, -1)); // lea ecx,[ecx-1] + a.rcl(reglo, 1); // rcl reglo,1 + a.rcl(reghi, 1); // rcl reghi,1 + a.jmp(loop); // jmp loop + a.bind(skiploop); // skiploop: + reset_last_upper_lower_reg(); + a.rcl(reglo, 1); // rcl reglo,1 + a.pushfd(); // pushf + a.rcl(reghi, 1); // rcl reghi,1 + a.bind(skipall); // skipall: + reset_last_upper_lower_reg(); + emit_combine_z_flags(a); } } @@ -2715,43 +1839,95 @@ void drcbe_x86::emit_rcl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // pair of registers from a 64-bit parameter //------------------------------------------------- -void drcbe_x86::emit_rcr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter ¶m, const instruction &inst) +void drcbe_x86::emit_rcr_r64_p64(Assembler &a, Gp const ®lo, Gp const ®hi, be_parameter const ¶m, const instruction &inst) { int saveflags = (inst.flags() != 0); - emit_link skipall, skiploop; - x86code *loop; + Label loop = a.newLabel(); + Label skipall = a.newLabel(); + Label skiploop = a.newLabel(); - emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param + emit_mov_r32_p32_keepflags(a, ecx, param); // mov ecx,param if (!saveflags) { - loop = dst; // loop: - emit_jecxz_link(dst, skipall); // jecxz skipall - emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1] - emit_rcr_r32_imm(dst, reghi, 1); // rcr reghi,1 - emit_rcr_r32_imm(dst, reglo, 1); // rcr reglo,1 - emit_jmp(dst, loop); // jmp loop - track_resolve_link(dst, skipall); // skipall: + a.bind(loop); // loop: + a.jecxz(skipall); // jecxz skipall + a.lea(ecx, ptr(ecx, -1)); // lea ecx,[ecx-1] + a.rcr(reghi, 1); // rcr reghi,1 + a.rcr(reglo, 1); // rcr reglo,1 + a.jmp(loop); // jmp loop + a.bind(skipall); // skipall: + reset_last_upper_lower_reg(); } else { - emit_jecxz_link(dst, skipall); // jecxz skipall - emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1] - loop = dst; // loop: - emit_jecxz_link(dst, skiploop); // jecxz skiploop - emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1] - emit_rcr_r32_imm(dst, reghi, 1); // rcr reghi,1 - emit_rcr_r32_imm(dst, reglo, 1); // rcr reglo,1 - emit_jmp(dst, loop); // jmp loop - track_resolve_link(dst, skiploop); // skiploop: - emit_rcr_r32_imm(dst, reghi, 1); // rcr reghi,1 - emit_pushf(dst); // pushf - emit_rcr_r32_imm(dst, reglo, 1); // rcr reglo,1 - track_resolve_link(dst, skipall); // skipall: - emit_combine_z_shl_flags(dst); + a.jecxz(skipall); // jecxz skipall + a.lea(ecx, ptr(ecx, -1)); // lea ecx,[ecx-1] + a.bind(loop); // loop: + a.jecxz(skiploop); // jecxz skiploop + a.lea(ecx, ptr(ecx, -1)); // lea ecx,[ecx-1] + a.rcr(reghi, 1); // rcr reghi,1 + a.rcr(reglo, 1); // rcr reglo,1 + a.jmp(loop); // jmp loop + a.bind(skiploop); // skiploop: + reset_last_upper_lower_reg(); + a.rcr(reghi, 1); // rcr reghi,1 + a.pushfd(); // pushf + a.rcr(reglo, 1); // rcr reglo,1 + a.bind(skipall); // skipall: + reset_last_upper_lower_reg(); + emit_combine_z_shl_flags(a); + } +} + + +void drcbe_x86::alu_op_param(Assembler &a, Inst::Id const opcode_lo, Inst::Id const opcode_hi, Gp const &lo, Gp const &hi, be_parameter const ¶m, bool saveflags) +{ + if (param.is_memory()) + { + a.emit(opcode_lo, lo, MABS(param.memory(0))); // opl reglo,[param] + if (saveflags) a.pushfd(); // pushf + a.emit(opcode_hi, hi, MABS(param.memory(4))); // oph reghi,[param] + } + else if (param.is_immediate()) + { + a.emit(opcode_lo, lo, param.immediate()); // opl reglo,param + if (saveflags) a.pushfd(); // pushf + a.emit(opcode_hi, hi, param.immediate() >> 32); // oph reghi,param >> 32 } + else if (param.is_int_register()) + { + a.emit(opcode_lo, lo, Gpd(param.ireg())); // opl reglo,param + if (saveflags) a.pushfd(); // pushf + a.emit(opcode_hi, hi, MABS(m_reghi[param.ireg()])); // oph reghi,reghi[param] + } + + if (saveflags) + emit_combine_z_flags(a); } +void drcbe_x86::alu_op_param(Assembler &a, Inst::Id const opcode_lo, Inst::Id const opcode_hi, Mem const &lo, Mem const &hi, be_parameter const ¶m, bool saveflags) +{ + if (param.is_immediate()) + { + a.emit(opcode_lo, lo, param.immediate()); // opl [dest],param + if (saveflags) a.pushfd(); // pushf + a.emit(opcode_hi, hi, param.immediate() >> 32); // oph [dest+4],param >> 32 + } + else + { + Gp const reg = (param.is_int_register()) ? Gpd(param.ireg()) : eax; + + emit_mov_r64_p64(a, reg, edx, param); // mov edx:reglo,param + a.emit(opcode_lo, lo, reg); // opl [dest],reglo + if (saveflags) a.pushfd(); // pushf + a.emit(opcode_hi, hi, edx); // oph [dest+4],edx + } + + if (saveflags) + emit_combine_z_flags(a); +} + //************************************************************************** // EMITTERS FOR FLOATING POINT @@ -2762,14 +1938,11 @@ void drcbe_x86::emit_rcr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co // onto the stack //------------------------------------------------- -void drcbe_x86::emit_fld_p(x86code *&dst, int size, const be_parameter ¶m) +void drcbe_x86::emit_fld_p(Assembler &a, int size, be_parameter const ¶m) { assert(param.is_memory()); assert(size == 4 || size == 8); - if (size == 4) - emit_fld_m32(dst, MABS(param.memory())); - else if (size == 8) - emit_fld_m64(dst, MABS(param.memory())); + a.fld(ptr(u64(param.memory()), size)); } @@ -2778,85 +1951,12 @@ void drcbe_x86::emit_fld_p(x86code *&dst, int size, const be_parameter ¶m) // from the stack and pop it //------------------------------------------------- -void drcbe_x86::emit_fstp_p(x86code *&dst, int size, const be_parameter ¶m) +void drcbe_x86::emit_fstp_p(Assembler &a, int size, be_parameter const ¶m) { assert(param.is_memory()); assert(size == 4 || size == 8); - if (size == 4) - emit_fstp_m32(dst, MABS(param.memory())); - else if (size == 8) - emit_fstp_m64(dst, MABS(param.memory())); -} - - -//************************************************************************** -// OUT-OF-BAND CODE FIXUP CALLBACKS -//************************************************************************** - -//------------------------------------------------- -// fixup_label - callback to fixup forward- -// referenced labels -//------------------------------------------------- - -void drcbe_x86::fixup_label(void *parameter, drccodeptr labelcodeptr) -{ - drccodeptr src = (drccodeptr)parameter; - - // find the end of the instruction - if (src[0] == 0xe3) - { - src += 1 + 1; - src[-1] = labelcodeptr - src; - } - else if (src[0] == 0xe9) - { - src += 1 + 4; - ((uint32_t *)src)[-1] = labelcodeptr - src; - } - else if (src[0] == 0x0f && (src[1] & 0xf0) == 0x80) - { - src += 2 + 4; - ((uint32_t *)src)[-1] = labelcodeptr - src; - } - else - fatalerror("fixup_label called with invalid jmp source!\n"); -} - - -//------------------------------------------------- -// fixup_exception - callback to perform cleanup -// and jump to an exception handler -//------------------------------------------------- - -void drcbe_x86::fixup_exception(drccodeptr *codeptr, void *param1, void *param2) -{ - drccodeptr src = (drccodeptr)param1; - const instruction &inst = *(const instruction *)param2; - - // normalize parameters - const parameter &handp = inst.param(0); - assert(handp.is_code_handle()); - be_parameter exp(*this, inst.param(1), PTYPE_MRI); - - // look up the handle target - drccodeptr *targetptr = handp.handle().codeptr_addr(); - - // first fixup the jump to get us here - drccodeptr dst = *codeptr; - ((uint32_t *)src)[-1] = dst - src; - - // then store the exception parameter - emit_mov_m32_p32(dst, MABS(&m_state.exp), exp); // mov [exp],exp - - // push the original return address on the stack - emit_push_imm(dst, (uintptr_t)src); // push <return> - if (*targetptr != nullptr) - emit_jmp(dst, *targetptr); // jmp *targetptr - else - emit_jmp_m32(dst, MABS(targetptr)); // jmp [targetptr] - - *codeptr = dst; + a.fstp(ptr(u64(param.memory()), size)); } @@ -2885,7 +1985,7 @@ void drcbe_x86::debug_log_hashjmp(int mode, offs_t pc) // op_handle - process a HANDLE opcode //------------------------------------------------- -void drcbe_x86::op_handle(x86code *&dst, const instruction &inst) +void drcbe_x86::op_handle(Assembler &a, const instruction &inst) { assert_no_condition(inst); assert_no_flags(inst); @@ -2894,16 +1994,21 @@ void drcbe_x86::op_handle(x86code *&dst, const instruction &inst) reset_last_upper_lower_reg(); + // make a label for documentation + Label handle = a.newNamedLabel(inst.param(0).handle().string()); + a.bind(handle); + // emit a jump around the stack adjust in case code falls through here - emit_link skip; - emit_jmp_short_link(dst, skip); // jmp skip + Label skip = a.newLabel(); + a.short_().jmp(skip); // jmp skip // register the current pointer for the handle - inst.param(0).handle().set_codeptr(dst); + inst.param(0).handle().set_codeptr(drccodeptr(a.code()->baseAddress() + a.offset())); // by default, the handle points to prolog code that moves the stack pointer - emit_lea_r32_m32(dst, REG_ESP, MBD(REG_ESP, -28)); // lea rsp,[rsp-28] - track_resolve_link(dst, skip); // skip: + a.lea(esp, ptr(esp, -28)); // lea rsp,[rsp-28] + a.bind(skip); // skip: + reset_last_upper_lower_reg(); } @@ -2911,7 +2016,7 @@ void drcbe_x86::op_handle(x86code *&dst, const instruction &inst) // op_hash - process a HASH opcode //------------------------------------------------- -void drcbe_x86::op_hash(x86code *&dst, const instruction &inst) +void drcbe_x86::op_hash(Assembler &a, const instruction &inst) { assert_no_condition(inst); assert_no_flags(inst); @@ -2920,7 +2025,7 @@ void drcbe_x86::op_hash(x86code *&dst, const instruction &inst) assert(inst.param(1).is_immediate()); // register the current pointer for the mode/PC - m_hash.set_codeptr(inst.param(0).immediate(), inst.param(1).immediate(), dst); + m_hash.set_codeptr(inst.param(0).immediate(), inst.param(1).immediate(), drccodeptr(a.code()->baseAddress() + a.offset())); reset_last_upper_lower_reg(); } @@ -2929,15 +2034,21 @@ void drcbe_x86::op_hash(x86code *&dst, const instruction &inst) // op_label - process a LABEL opcode //------------------------------------------------- -void drcbe_x86::op_label(x86code *&dst, const instruction &inst) +void drcbe_x86::op_label(Assembler &a, const instruction &inst) { assert_no_condition(inst); assert_no_flags(inst); assert(inst.numparams() == 1); assert(inst.param(0).is_code_label()); + std::string labelName = util::string_format("PC$%x", inst.param(0).label()); + Label label = a.labelByName(labelName.c_str()); + if (!label.isValid()) + label = a.newNamedLabel(labelName.c_str()); + // register the current pointer for the label - m_labels.set_codeptr(inst.param(0).label(), dst); + a.bind(label); + reset_last_upper_lower_reg(); } @@ -2946,7 +2057,7 @@ void drcbe_x86::op_label(x86code *&dst, const instruction &inst) // op_comment - process a COMMENT opcode //------------------------------------------------- -void drcbe_x86::op_comment(x86code *&dst, const instruction &inst) +void drcbe_x86::op_comment(Assembler &a, const instruction &inst) { assert_no_condition(inst); assert_no_flags(inst); @@ -2961,7 +2072,7 @@ void drcbe_x86::op_comment(x86code *&dst, const instruction &inst) // op_mapvar - process a MAPVAR opcode //------------------------------------------------- -void drcbe_x86::op_mapvar(x86code *&dst, const instruction &inst) +void drcbe_x86::op_mapvar(Assembler &a, const instruction &inst) { assert_no_condition(inst); assert_no_flags(inst); @@ -2970,7 +2081,7 @@ void drcbe_x86::op_mapvar(x86code *&dst, const instruction &inst) assert(inst.param(1).is_immediate()); // set the value of the specified mapvar - m_map.set_value(dst, inst.param(0).mapvar(), inst.param(1).immediate()); + m_map.set_value(drccodeptr(a.code()->baseAddress() + a.offset()), inst.param(0).mapvar(), inst.param(1).immediate()); } @@ -2983,7 +2094,7 @@ void drcbe_x86::op_mapvar(x86code *&dst, const instruction &inst) // op_nop - process a NOP opcode //------------------------------------------------- -void drcbe_x86::op_nop(x86code *&dst, const instruction &inst) +void drcbe_x86::op_nop(Assembler &a, const instruction &inst) { // nothing } @@ -2993,7 +2104,7 @@ void drcbe_x86::op_nop(x86code *&dst, const instruction &inst) // op_debug - process a DEBUG opcode //------------------------------------------------- -void drcbe_x86::op_debug(x86code *&dst, const instruction &inst) +void drcbe_x86::op_debug(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3006,19 +2117,20 @@ void drcbe_x86::op_debug(x86code *&dst, const instruction &inst) if ((m_device.machine().debug_flags & DEBUG_FLAG_ENABLED) != 0) { // normalize parameters - be_parameter pcp(*this, inst.param(0), PTYPE_MRI); + be_parameter const pcp(*this, inst.param(0), PTYPE_MRI); // test and branch - emit_test_m32_imm(dst, MABS(&m_device.machine().debug_flags), DEBUG_FLAG_CALL_HOOK); // test [debug_flags],DEBUG_FLAG_CALL_HOOK - emit_link skip = { nullptr }; - emit_jcc_short_link(dst, x86emit::COND_Z, skip); // jz skip + a.test(MABS(&m_device.machine().debug_flags, 4), DEBUG_FLAG_CALL_HOOK); // test [debug_flags],DEBUG_FLAG_CALL_HOOK + Label skip = a.newLabel(); + a.short_().jz(skip); // jz skip // push the parameter - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), pcp); // mov [esp+4],pcp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_device.debug()); // mov [esp],device.debug - emit_call(dst, (x86code *)debugger_inst_hook); // call debugger_inst_hook + emit_mov_m32_p32(a, dword_ptr(esp, 4), pcp); // mov [esp+4],pcp + a.mov(dword_ptr(esp, 0), imm(m_device.debug())); // mov [esp],device.debug + a.call(imm(debugger_inst_hook)); // call debugger_inst_hook - track_resolve_link(dst, skip); // skip: + a.bind(skip); // skip: + reset_last_upper_lower_reg(); } } @@ -3027,7 +2139,7 @@ void drcbe_x86::op_debug(x86code *&dst, const instruction &inst) // op_exit - process an EXIT opcode //------------------------------------------------- -void drcbe_x86::op_exit(x86code *&dst, const instruction &inst) +void drcbe_x86::op_exit(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3038,11 +2150,11 @@ void drcbe_x86::op_exit(x86code *&dst, const instruction &inst) be_parameter retp(*this, inst.param(0), PTYPE_MRI); // load the parameter into EAX - emit_mov_r32_p32(dst, REG_EAX, retp); // mov eax,retp + emit_mov_r32_p32(a, eax, retp); // mov eax,retp if (inst.condition() == uml::COND_ALWAYS) - emit_jmp(dst, m_exit); // jmp exit + a.jmp(imm(m_exit)); // jmp exit else - emit_jcc(dst, X86_CONDITION(inst.condition()), m_exit); // jcc exit + a.j(X86_CONDITION(inst.condition()), imm(m_exit)); // jcc exit } @@ -3050,7 +2162,7 @@ void drcbe_x86::op_exit(x86code *&dst, const instruction &inst) // op_hashjmp - process a HASHJMP opcode //------------------------------------------------- -void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst) +void drcbe_x86::op_hashjmp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3060,18 +2172,18 @@ void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst) // normalize parameters be_parameter modep(*this, inst.param(0), PTYPE_MRI); be_parameter pcp(*this, inst.param(1), PTYPE_MRI); - const parameter &exp = inst.param(2); + parameter const &exp = inst.param(2); assert(exp.is_code_handle()); if (LOG_HASHJMPS) { - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), pcp); - emit_mov_m32_p32(dst, MBD(REG_ESP, 0), modep); - emit_call(dst, (x86code *)debug_log_hashjmp); + emit_mov_m32_p32(a, dword_ptr(esp, 4), pcp); + emit_mov_m32_p32(a, dword_ptr(esp, 0), modep); + a.call(imm(debug_log_hashjmp)); } // load the stack base one word early so we end up at the right spot after our call below - emit_mov_r32_m32(dst, REG_ESP, MABS(&m_hashstacksave)); // mov esp,[hashstacksave] + a.mov(esp, MABS(&m_hashstacksave)); // mov esp,[hashstacksave] // fixed mode cases if (modep.is_immediate() && m_hash.is_mode_populated(modep.immediate())) @@ -3081,53 +2193,53 @@ void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst) { uint32_t l1val = (pcp.immediate() >> m_hash.l1shift()) & m_hash.l1mask(); uint32_t l2val = (pcp.immediate() >> m_hash.l2shift()) & m_hash.l2mask(); - emit_call_m32(dst, MABS(&m_hash.base()[modep.immediate()][l1val][l2val])); // call hash[modep][l1val][l2val] + a.call(MABS(&m_hash.base()[modep.immediate()][l1val][l2val])); // call hash[modep][l1val][l2val] } // a fixed mode but variable PC else { - emit_mov_r32_p32(dst, REG_EAX, pcp); // mov eax,pcp - emit_mov_r32_r32(dst, REG_EDX, REG_EAX); // mov edx,eax - emit_shr_r32_imm(dst, REG_EDX, m_hash.l1shift()); // shr edx,l1shift - emit_and_r32_imm(dst, REG_EAX, m_hash.l2mask() << m_hash.l2shift());// and eax,l2mask << l2shift - emit_mov_r32_m32(dst, REG_EDX, MABSI(&m_hash.base()[modep.immediate()][0], REG_EDX, 4)); + emit_mov_r32_p32(a, eax, pcp); // mov eax,pcp + a.mov(edx, eax); // mov edx,eax + a.shr(edx, m_hash.l1shift()); // shr edx,l1shift + a.and_(eax, m_hash.l2mask() << m_hash.l2shift()); // and eax,l2mask << l2shift + a.mov(edx, ptr(u64(&m_hash.base()[modep.immediate()][0]), edx, 2)); // mov edx,hash[modep+edx*4] - emit_call_m32(dst, MBISD(REG_EDX, REG_EAX, 4 >> m_hash.l2shift(), 0));// call [edx+eax*shift] + a.call(ptr(edx, eax, 2 - m_hash.l2shift())); // call [edx+eax*shift] } } else { // variable mode - int modereg = modep.select_register(REG_ECX); - emit_mov_r32_p32(dst, modereg, modep); // mov modereg,modep - emit_mov_r32_m32(dst, REG_ECX, MABSI(m_hash.base(), modereg, 4)); // mov ecx,hash[modereg*4] + Gp const modereg = modep.select_register(ecx); + emit_mov_r32_p32(a, modereg, modep); // mov modereg,modep + a.mov(ecx, ptr(u64(m_hash.base()), modereg, 2)); // mov ecx,hash[modereg*4] // fixed PC if (pcp.is_immediate()) { uint32_t l1val = (pcp.immediate() >> m_hash.l1shift()) & m_hash.l1mask(); uint32_t l2val = (pcp.immediate() >> m_hash.l2shift()) & m_hash.l2mask(); - emit_mov_r32_m32(dst, REG_EDX, MBD(REG_ECX, l1val*4)); // mov edx,[ecx+l1val*4] - emit_call_m32(dst, MBD(REG_EDX, l2val*4)); // call [l2val*4] + a.mov(edx, ptr(ecx, l1val*4)); // mov edx,[ecx+l1val*4] + a.call(ptr(edx, l2val*4)); // call [l2val*4] } // variable PC else { - emit_mov_r32_p32(dst, REG_EAX, pcp); // mov eax,pcp - emit_mov_r32_r32(dst, REG_EDX, REG_EAX); // mov edx,eax - emit_shr_r32_imm(dst, REG_EDX, m_hash.l1shift()); // shr edx,l1shift - emit_mov_r32_m32(dst, REG_EDX, MBISD(REG_ECX, REG_EDX, 4, 0)); // mov edx,[ecx+edx*4] - emit_and_r32_imm(dst, REG_EAX, m_hash.l2mask() << m_hash.l2shift());// and eax,l2mask << l2shift - emit_call_m32(dst, MBISD(REG_EDX, REG_EAX, 4 >> m_hash.l2shift(), 0));// call [edx+eax*shift] + emit_mov_r32_p32(a, eax, pcp); // mov eax,pcp + a.mov(edx, eax); // mov edx,eax + a.shr(edx, m_hash.l1shift()); // shr edx,l1shift + a.mov(edx, ptr(ecx, edx, 2)); // mov edx,[ecx+edx*4] + a.and_(eax, m_hash.l2mask() << m_hash.l2shift()); // and eax,l2mask << l2shift + a.call(ptr(edx, eax, 2 - m_hash.l2shift())); // call [edx+eax*shift] } } // in all cases, if there is no code, we return here to generate the exception - emit_mov_m32_p32(dst, MABS(&m_state.exp), pcp); // mov [exp],param - emit_sub_r32_imm(dst, REG_ESP, 4); // sub esp,4 - emit_call_m32(dst, MABS(exp.handle().codeptr_addr())); // call [exp] + emit_mov_m32_p32(a, MABS(&m_state.exp, 4), pcp); // mov [exp],param + a.sub(esp, 4); // sub esp,4 + a.call(MABS(exp.handle().codeptr_addr())); // call [exp] } @@ -3135,7 +2247,7 @@ void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst) // op_jmp - process a JMP opcode //------------------------------------------------- -void drcbe_x86::op_jmp(x86code *&dst, const instruction &inst) +void drcbe_x86::op_jmp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3143,15 +2255,18 @@ void drcbe_x86::op_jmp(x86code *&dst, const instruction &inst) assert_no_flags(inst); // normalize parameters - const parameter &labelp = inst.param(0); + parameter const &labelp = inst.param(0); assert(labelp.is_code_label()); - // look up the jump target and jump there - x86code *jmptarget = (x86code *)m_labels.get_codeptr(labelp.label(), m_fixup_label, dst); + std::string labelName = util::string_format("PC$%x", labelp.label()); + Label jmptarget = a.labelByName(labelName.c_str()); + if (!jmptarget.isValid()) + jmptarget = a.newNamedLabel(labelName.c_str()); + if (inst.condition() == uml::COND_ALWAYS) - emit_jmp(dst, jmptarget); // jmp target + a.jmp(jmptarget); // jmp target else - emit_jcc(dst, X86_CONDITION(inst.condition()), jmptarget); // jcc target + a.j(X86_CONDITION(inst.condition()), jmptarget); // jcc target } @@ -3159,7 +2274,7 @@ void drcbe_x86::op_jmp(x86code *&dst, const instruction &inst) // op_exh - process an EXH opcode //------------------------------------------------- -void drcbe_x86::op_exh(x86code *&dst, const instruction &inst) +void drcbe_x86::op_exh(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3167,29 +2282,24 @@ void drcbe_x86::op_exh(x86code *&dst, const instruction &inst) assert_no_flags(inst); // normalize parameters - const parameter &handp = inst.param(0); + parameter const &handp = inst.param(0); assert(handp.is_code_handle()); be_parameter exp(*this, inst.param(1), PTYPE_MRI); // look up the handle target drccodeptr *targetptr = handp.handle().codeptr_addr(); - // perform the exception processing inline if unconditional - if (inst.condition() == uml::COND_ALWAYS) - { - emit_mov_m32_p32(dst, MABS(&m_state.exp), exp); // mov [exp],exp - if (*targetptr != nullptr) - emit_call(dst, *targetptr); // call *targetptr - else - emit_call_m32(dst, MABS(targetptr)); // call [targetptr] - } - - // otherwise, jump to an out-of-band handler + // perform the exception processing + Label no_exception = a.newLabel(); + if (inst.condition() != uml::COND_ALWAYS) + a.short_().j(X86_NOT_CONDITION(inst.condition()), no_exception); // jcc no_exception + emit_mov_m32_p32(a, MABS(&m_state.exp, 4), exp); // mov [exp],exp + if (*targetptr != nullptr) + a.call(imm(*targetptr)); // call *targetptr else - { - emit_jcc(dst, X86_CONDITION(inst.condition()), nullptr); // jcc exception - m_cache.request_oob_codegen(m_fixup_exception, dst, &const_cast<instruction &>(inst)); - } + a.call(MABS(targetptr)); // call [targetptr] + if (inst.condition() != uml::COND_ALWAYS) + a.bind(no_exception); } @@ -3197,7 +2307,7 @@ void drcbe_x86::op_exh(x86code *&dst, const instruction &inst) // op_callh - process a CALLH opcode //------------------------------------------------- -void drcbe_x86::op_callh(x86code *&dst, const instruction &inst) +void drcbe_x86::op_callh(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3205,26 +2315,29 @@ void drcbe_x86::op_callh(x86code *&dst, const instruction &inst) assert_no_flags(inst); // normalize parameters - const parameter &handp = inst.param(0); + parameter const &handp = inst.param(0); assert(handp.is_code_handle()); // look up the handle target drccodeptr *targetptr = handp.handle().codeptr_addr(); // skip if conditional - emit_link skip = { nullptr }; + Label skip = a.newLabel(); if (inst.condition() != uml::COND_ALWAYS) - emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip // jump through the handle; directly if a normal jump if (*targetptr != nullptr) - emit_call(dst, *targetptr); // call *targetptr + a.call(imm(*targetptr)); // call *targetptr else - emit_call_m32(dst, MABS(targetptr)); // call [targetptr] + a.call(MABS(targetptr)); // call [targetptr] // resolve the conditional link if (inst.condition() != uml::COND_ALWAYS) - track_resolve_link(dst, skip); // skip: + { + a.bind(skip); // skip: + reset_last_upper_lower_reg(); + } } @@ -3232,7 +2345,7 @@ void drcbe_x86::op_callh(x86code *&dst, const instruction &inst) // op_ret - process a RET opcode //------------------------------------------------- -void drcbe_x86::op_ret(x86code *&dst, const instruction &inst) +void drcbe_x86::op_ret(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3241,17 +2354,20 @@ void drcbe_x86::op_ret(x86code *&dst, const instruction &inst) assert(inst.numparams() == 0); // skip if conditional - emit_link skip = { nullptr }; + Label skip = a.newLabel(); if (inst.condition() != uml::COND_ALWAYS) - emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + a.j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip // return - emit_lea_r32_m32(dst, REG_ESP, MBD(REG_ESP, 28)); // lea rsp,[rsp+28] - emit_ret(dst); // ret + a.lea(esp, ptr(esp, 28)); // lea rsp,[rsp+28] + a.ret(); // ret // resolve the conditional link if (inst.condition() != uml::COND_ALWAYS) - track_resolve_link(dst, skip); // skip: + { + a.bind(skip); // skip: + reset_last_upper_lower_reg(); + } } @@ -3259,7 +2375,7 @@ void drcbe_x86::op_ret(x86code *&dst, const instruction &inst) // op_callc - process a CALLC opcode //------------------------------------------------- -void drcbe_x86::op_callc(x86code *&dst, const instruction &inst) +void drcbe_x86::op_callc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3267,22 +2383,25 @@ void drcbe_x86::op_callc(x86code *&dst, const instruction &inst) assert_no_flags(inst); // normalize parameters - const parameter &funcp = inst.param(0); + parameter const &funcp = inst.param(0); assert(funcp.is_c_function()); be_parameter paramp(*this, inst.param(1), PTYPE_M); // skip if conditional - emit_link skip = { nullptr }; + Label skip = a.newLabel(); if (inst.condition() != uml::COND_ALWAYS) - emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + a.j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip // perform the call - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)paramp.memory()); // mov [esp],paramp - emit_call(dst, (x86code *)(uintptr_t)funcp.cfunc()); // call funcp + a.mov(dword_ptr(esp, 0), imm(paramp.memory())); // mov [esp],paramp + a.call(imm(funcp.cfunc())); // call funcp // resolve the conditional link if (inst.condition() != uml::COND_ALWAYS) - track_resolve_link(dst, skip); // skip: + { + a.bind(skip); // skip: + reset_last_upper_lower_reg(); + } } @@ -3290,7 +2409,7 @@ void drcbe_x86::op_callc(x86code *&dst, const instruction &inst) // op_recover - process a RECOVER opcode //------------------------------------------------- -void drcbe_x86::op_recover(x86code *&dst, const instruction &inst) +void drcbe_x86::op_recover(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3301,14 +2420,14 @@ void drcbe_x86::op_recover(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_MR); // call the recovery code - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_stacksave)); // mov eax,stacksave - emit_mov_r32_m32(dst, REG_EAX, MBD(REG_EAX, -4)); // mov eax,[eax-4] - emit_sub_r32_imm(dst, REG_EAX, 1); // sub eax,1 - emit_mov_m32_imm(dst, MBD(REG_ESP, 8), inst.param(1).mapvar()); // mov [esp+8],param1 - emit_mov_m32_r32(dst, MBD(REG_ESP, 4), REG_EAX); // mov [esp+4],eax - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_map); // mov [esp],m_map - emit_call(dst, (x86code *)&drc_map_variables::static_get_value); // call drcmap_get_value - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + a.mov(eax, MABS(&m_stacksave)); // mov eax,stacksave + a.mov(eax, ptr(eax, -4)); // mov eax,[eax-4] + a.sub(eax, 1); // sub eax,1 + a.mov(dword_ptr(esp, 8), inst.param(1).mapvar()); // mov [esp+8],param1 + a.mov(ptr(esp, 4), eax); // mov [esp+4],eax + a.mov(dword_ptr(esp, 0), imm(&m_map)); // mov [esp],m_map + a.call(imm(&drc_map_variables::static_get_value)); // call drcmap_get_value + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax } @@ -3321,7 +2440,7 @@ void drcbe_x86::op_recover(x86code *&dst, const instruction &inst) // op_setfmod - process a SETFMOD opcode //------------------------------------------------- -void drcbe_x86::op_setfmod(x86code *&dst, const instruction &inst) +void drcbe_x86::op_setfmod(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3335,17 +2454,17 @@ void drcbe_x86::op_setfmod(x86code *&dst, const instruction &inst) if (srcp.is_immediate()) { int value = srcp.immediate() & 3; - emit_mov_m8_imm(dst, MABS(&m_state.fmod), value); // mov [fmod],srcp - emit_fldcw_m16(dst, MABS(&fp_control[value])); // fldcw fp_control[srcp] + a.mov(MABS(&m_state.fmod, 1), value); // mov [fmod],srcp + a.fldcw(MABS(&fp_control[value], 2)); // fldcw fp_control[srcp] } // register/memory case else { - emit_mov_r32_p32(dst, REG_EAX, srcp); // mov eax,srcp - emit_and_r32_imm(dst, REG_EAX, 3); // and eax,3 - emit_mov_m8_r8(dst, MABS(&m_state.fmod), REG_AL); // mov [fmod],al - emit_fldcw_m16(dst, MABSI(&fp_control[0], REG_EAX, 2)); // fldcw fp_control[eax] + emit_mov_r32_p32(a, eax, srcp); // mov eax,srcp + a.and_(eax, 3); // and eax,3 + a.mov(MABS(&m_state.fmod), al); // mov [fmod],al + a.fldcw(ptr(u64(&fp_control[0]), eax, 1, 2)); // fldcw fp_control[eax] } } @@ -3354,7 +2473,7 @@ void drcbe_x86::op_setfmod(x86code *&dst, const instruction &inst) // op_getfmod - process a GETFMOD opcode //------------------------------------------------- -void drcbe_x86::op_getfmod(x86code *&dst, const instruction &inst) +void drcbe_x86::op_getfmod(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3366,11 +2485,11 @@ void drcbe_x86::op_getfmod(x86code *&dst, const instruction &inst) // fetch the current mode and store to the destination if (dstp.is_int_register()) - emit_movzx_r32_m8(dst, dstp.ireg(), MABS(&m_state.fmod)); // movzx reg,[fmod] + a.movzx(Gpd(dstp.ireg()), MABS(&m_state.fmod, 1)); // movzx reg,[fmod] else { - emit_movzx_r32_m8(dst, REG_EAX, MABS(&m_state.fmod)); // movzx eax,[fmod] - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax + a.movzx(eax, MABS(&m_state.fmod, 1)); // movzx eax,[fmod] + a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax } } @@ -3379,7 +2498,7 @@ void drcbe_x86::op_getfmod(x86code *&dst, const instruction &inst) // op_getexp - process a GETEXP opcode //------------------------------------------------- -void drcbe_x86::op_getexp(x86code *&dst, const instruction &inst) +void drcbe_x86::op_getexp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3391,11 +2510,11 @@ void drcbe_x86::op_getexp(x86code *&dst, const instruction &inst) // fetch the exception parameter and store to the destination if (dstp.is_int_register()) - emit_mov_r32_m32(dst, dstp.ireg(), MABS(&m_state.exp)); // mov reg,[exp] + a.mov(Gpd(dstp.ireg()), MABS(&m_state.exp)); // mov reg,[exp] else { - emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.exp)); // mov eax,[exp] - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax + a.mov(eax, MABS(&m_state.exp)); // mov eax,[exp] + a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax } } @@ -3404,7 +2523,7 @@ void drcbe_x86::op_getexp(x86code *&dst, const instruction &inst) // op_getflgs - process a GETFLGS opcode //------------------------------------------------- -void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst) +void drcbe_x86::op_getflgs(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3416,7 +2535,7 @@ void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst) be_parameter maskp(*this, inst.param(1), PTYPE_I); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // compute mask for flags uint32_t flagmask = 0; @@ -3430,108 +2549,108 @@ void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst) { // single flags only case FLAG_C: - emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al + a.setc(al); // setc al + a.movzx(dstreg, al); // movzx dstreg,al break; case FLAG_V: - emit_setcc_r8(dst, x86emit::COND_O, REG_AL); // seto al - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al - emit_shl_r32_imm(dst, dstreg, 1); // shl dstreg,1 + a.seto(al); // seto al + a.movzx(dstreg, al); // movzx dstreg,al + a.shl(dstreg, 1); // shl dstreg,1 break; case FLAG_Z: - emit_setcc_r8(dst, x86emit::COND_Z, REG_AL); // setz al - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al - emit_shl_r32_imm(dst, dstreg, 2); // shl dstreg,2 + a.setz(al); // setz al + a.movzx(dstreg, al); // movzx dstreg,al + a.shl(dstreg, 2); // shl dstreg,2 break; case FLAG_S: - emit_setcc_r8(dst, x86emit::COND_S, REG_AL); // sets al - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al - emit_shl_r32_imm(dst, dstreg, 3); // shl dstreg,3 + a.sets(al); // sets al + a.movzx(dstreg, al); // movzx dstreg,al + a.shl(dstreg, 3); // shl dstreg,3 break; case FLAG_U: - emit_setcc_r8(dst, x86emit::COND_P, REG_AL); // setp al - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al - emit_shl_r32_imm(dst, dstreg, 4); // shl dstreg,4 + a.setp(al); // setp al + a.movzx(dstreg, al); // movzx dstreg,al + a.shl(dstreg, 4); // shl dstreg,4 break; // carry plus another flag case FLAG_C | FLAG_V: - emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al - emit_setcc_r8(dst, x86emit::COND_O, REG_CL); // seto cl - emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al - emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al - emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 2, 0)); // lea dstreg,[eax+ecx*2] + a.setc(al); // setc al + a.seto(cl); // seto cl + a.movzx(eax, al); // movzx eax,al + a.movzx(ecx, cl); // movzx ecx,al + a.lea(dstreg, ptr(eax, ecx, 1)); // lea dstreg,[eax+ecx*2] break; case FLAG_C | FLAG_Z: - emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al - emit_setcc_r8(dst, x86emit::COND_Z, REG_CL); // setz cl - emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al - emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al - emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 4, 0)); // lea dstreg,[eax+ecx*4] + a.setc(al); // setc al + a.setz(cl); // setz cl + a.movzx(eax, al); // movzx eax,al + a.movzx(ecx, cl); // movzx ecx,al + a.lea(dstreg, ptr(eax, ecx, 2)); // lea dstreg,[eax+ecx*4] break; case FLAG_C | FLAG_S: - emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al - emit_setcc_r8(dst, x86emit::COND_S, REG_CL); // sets cl - emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al - emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al - emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 8, 0)); // lea dstreg,[eax+ecx*8] + a.setc(al); // setc al + a.sets(cl); // sets cl + a.movzx(eax, al); // movzx eax,al + a.movzx(ecx, cl); // movzx ecx,al + a.lea(dstreg, ptr(eax, ecx, 3)); // lea dstreg,[eax+ecx*8] break; // overflow plus another flag case FLAG_V | FLAG_Z: - emit_setcc_r8(dst, x86emit::COND_O, REG_AL); // seto al - emit_setcc_r8(dst, x86emit::COND_Z, REG_CL); // setz cl - emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al - emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al - emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 2, 0)); // lea dstreg,[eax+ecx*2] - emit_shl_r32_imm(dst, dstreg, 1); // shl dstreg,1 + a.seto(al); // seto al + a.setz(cl); // setz cl + a.movzx(eax, al); // movzx eax,al + a.movzx(ecx, cl); // movzx ecx,al + a.lea(dstreg, ptr(eax, ecx, 1)); // lea dstreg,[eax+ecx*2] + a.shl(dstreg, 1); // shl dstreg,1 break; case FLAG_V | FLAG_S: - emit_setcc_r8(dst, x86emit::COND_O, REG_AL); // seto al - emit_setcc_r8(dst, x86emit::COND_S, REG_CL); // sets cl - emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al - emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al - emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 4, 0)); // lea dstreg,[eax+ecx*4] - emit_shl_r32_imm(dst, dstreg, 1); // shl dstreg,1 + a.seto(al); // seto al + a.sets(cl); // sets cl + a.movzx(eax, al); // movzx eax,al + a.movzx(ecx, cl); // movzx ecx,al + a.lea(dstreg, ptr(eax, ecx, 2)); // lea dstreg,[eax+ecx*4] + a.shl(dstreg, 1); // shl dstreg,1 break; // zero plus another flag case FLAG_Z | FLAG_S: - emit_setcc_r8(dst, x86emit::COND_Z, REG_AL); // setz al - emit_setcc_r8(dst, x86emit::COND_S, REG_CL); // sets cl - emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al - emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al - emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 2, 0)); // lea dstreg,[eax+ecx*2] - emit_shl_r32_imm(dst, dstreg, 2); // shl dstreg,2 + a.setz(al); // setz al + a.sets(cl); // sets cl + a.movzx(eax, al); // movzx eax,al + a.movzx(ecx, cl); // movzx ecx,al + a.lea(dstreg, ptr(eax, ecx, 1)); // lea dstreg,[eax+ecx*2] + a.shl(dstreg, 2); // shl dstreg,2 break; // default cases default: - emit_pushf(dst); // pushf - emit_pop_r32(dst, REG_EAX); // pop eax - emit_and_r32_imm(dst, REG_EAX, flagmask); // and eax,flagmask - emit_movzx_r32_m8(dst, dstreg, MABSI(flags_map, REG_EAX)); // movzx dstreg,[flags_map] + a.pushfd(); // pushf + a.pop(eax); // pop eax + a.and_(eax, flagmask); // and eax,flagmask + a.movzx(dstreg, byte_ptr(u64(flags_map), eax)); // movzx dstreg,[flags_map] break; } // store low 32 bits - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) { // general case if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0 else if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0 } } @@ -3540,7 +2659,7 @@ void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst) // op_save - process a SAVE opcode //------------------------------------------------- -void drcbe_x86::op_save(x86code *&dst, const instruction &inst) +void drcbe_x86::op_save(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3551,8 +2670,8 @@ void drcbe_x86::op_save(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_M); // copy live state to the destination - emit_mov_r32_imm(dst, REG_ECX, (uintptr_t)dstp.memory()); // mov ecx,dstp - emit_call(dst, m_save); // call save + a.mov(ecx, imm(dstp.memory())); // mov ecx,dstp + a.call(imm(m_save)); // call save } @@ -3560,7 +2679,7 @@ void drcbe_x86::op_save(x86code *&dst, const instruction &inst) // op_restore - process a RESTORE opcode //------------------------------------------------- -void drcbe_x86::op_restore(x86code *&dst, const instruction &inst) +void drcbe_x86::op_restore(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4); @@ -3570,8 +2689,8 @@ void drcbe_x86::op_restore(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(0), PTYPE_M); // copy live state from the destination - emit_mov_r32_imm(dst, REG_ECX, (uintptr_t)srcp.memory()); // mov ecx,dstp - emit_call(dst, m_restore); // call restore + a.mov(ecx, imm(srcp.memory())); // mov ecx,dstp + a.call(imm(m_restore)); // call restore } @@ -3584,7 +2703,7 @@ void drcbe_x86::op_restore(x86code *&dst, const instruction &inst) // op_load - process a LOAD opcode //------------------------------------------------- -void drcbe_x86::op_load(x86code *&dst, const instruction &inst) +void drcbe_x86::op_load(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -3595,50 +2714,51 @@ void drcbe_x86::op_load(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter basep(*this, inst.param(1), PTYPE_M); be_parameter indp(*this, inst.param(2), PTYPE_MRI); - const parameter &scalesizep = inst.param(3); + parameter const &scalesizep = inst.param(3); assert(scalesizep.is_size_scale()); - int scale = 1 << scalesizep.scale(); - int size = scalesizep.size(); + int const size = scalesizep.size(); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // immediate index if (indp.is_immediate()) { + int const scale = 1 << scalesizep.scale(); + if (size == SIZE_BYTE) - emit_movzx_r32_m8(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movzx dstreg,[basep + scale*indp] + a.movzx(dstreg, MABS(basep.memory(scale*indp.immediate()), 1)); // movzx dstreg,[basep + scale*indp] else if (size == SIZE_WORD) - emit_movzx_r32_m16(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movzx dstreg,[basep + scale*indp] + a.movzx(dstreg, MABS(basep.memory(scale*indp.immediate()), 2)); // movzx dstreg,[basep + scale*indp] else if (size == SIZE_DWORD) - emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] + a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] else if (size == SIZE_QWORD) { - emit_mov_r32_m32(dst, REG_EDX, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4] - emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] + a.mov(edx, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4] + a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] } } // other index else { - int indreg = indp.select_register(REG_ECX); - emit_mov_r32_p32(dst, indreg, indp); + Gp const indreg = indp.select_register(ecx); + emit_mov_r32_p32(a, indreg, indp); if (size == SIZE_BYTE) - emit_movzx_r32_m8(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movzx dstreg,[basep + scale*indp] + a.movzx(dstreg, ptr(u64(basep.memory()), indreg, scalesizep.scale(), 1)); // movzx dstreg,[basep + scale*indp] else if (size == SIZE_WORD) - emit_movzx_r32_m16(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movzx dstreg,[basep + scale*indp] + a.movzx(dstreg, ptr(u64(basep.memory()), indreg, scalesizep.scale(), 2)); // movzx dstreg,[basep + scale*indp] else if (size == SIZE_DWORD) - emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp] + a.mov(dstreg, ptr(u64(basep.memory()), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp] else if (size == SIZE_QWORD) { - emit_mov_r32_m32(dst, REG_EDX, MABSI(basep.memory(4), indreg, scale)); // mov edx,[basep + scale*indp + 4] - emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp] + a.mov(edx, ptr(u64(basep.memory(4)), indreg, scalesizep.scale())); // mov edx,[basep + scale*indp + 4] + a.mov(dstreg, ptr(u64(basep.memory(0)), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp] } } // store low 32 bits - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) @@ -3647,22 +2767,22 @@ void drcbe_x86::op_load(x86code *&dst, const instruction &inst) if (size != SIZE_QWORD) { if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0 else if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0 } // 8-byte case else { if (dstp.is_memory()) - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx else if (dstp.is_int_register()) - emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx - set_last_upper_reg(dst, dstp, REG_EDX); + a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi],edx + set_last_upper_reg(a, dstp, edx); } } - set_last_lower_reg(dst, dstp, dstreg); + set_last_lower_reg(a, dstp, dstreg); } @@ -3670,7 +2790,7 @@ void drcbe_x86::op_load(x86code *&dst, const instruction &inst) // op_loads - process a LOADS opcode //------------------------------------------------- -void drcbe_x86::op_loads(x86code *&dst, const instruction &inst) +void drcbe_x86::op_loads(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -3681,62 +2801,63 @@ void drcbe_x86::op_loads(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter basep(*this, inst.param(1), PTYPE_M); be_parameter indp(*this, inst.param(2), PTYPE_MRI); - const parameter &scalesizep = inst.param(3); + parameter const &scalesizep = inst.param(3); assert(scalesizep.is_size_scale()); - int scale = 1 << scalesizep.scale(); - int size = scalesizep.size(); + int const size = scalesizep.size(); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // immediate index if (indp.is_immediate()) { + int const scale = 1 << scalesizep.scale(); + if (size == SIZE_BYTE) - emit_movsx_r32_m8(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movsx dstreg,[basep + scale*indp] + a.movsx(dstreg, MABS(basep.memory(scale*indp.immediate()), 1)); // movsx dstreg,[basep + scale*indp] else if (size == SIZE_WORD) - emit_movsx_r32_m16(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movsx dstreg,[basep + scale*indp] + a.movsx(dstreg, MABS(basep.memory(scale*indp.immediate()), 2)); // movsx dstreg,[basep + scale*indp] else if (size == SIZE_DWORD) - emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] + a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] else if (size == SIZE_QWORD) { - emit_mov_r32_m32(dst, REG_EDX, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4] - emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] + a.mov(edx, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4] + a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp] } } // other index else { - int indreg = indp.select_register(REG_ECX); - emit_mov_r32_p32(dst, indreg, indp); + Gp const indreg = indp.select_register(ecx); + emit_mov_r32_p32(a, indreg, indp); if (size == SIZE_BYTE) - emit_movsx_r32_m8(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movsx dstreg,[basep + scale*indp] + a.movsx(dstreg, ptr(u64(basep.memory()), indreg, scalesizep.scale(), 1)); // movsx dstreg,[basep + scale*indp] else if (size == SIZE_WORD) - emit_movsx_r32_m16(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movsx dstreg,[basep + scale*indp] + a.movsx(dstreg, ptr(u64(basep.memory()), indreg, scalesizep.scale(), 2)); // movsx dstreg,[basep + scale*indp] else if (size == SIZE_DWORD) - emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp] + a.mov(dstreg, ptr(u64(basep.memory()), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp] else if (size == SIZE_QWORD) { - emit_mov_r32_m32(dst, REG_EDX, MABSI(basep.memory(4), indreg, scale)); // mov edx,[basep + scale*indp + 4] - emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp] + a.mov(edx, ptr(u64(basep.memory(4)), indreg, scalesizep.scale())); // mov edx,[basep + scale*indp + 4] + a.mov(dstreg, ptr(u64(basep.memory(0)), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp] } } // store low 32 bits - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) { - emit_cdq(dst); // cdq + a.cdq(); // cdq if (dstp.is_memory()) - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx else if (dstp.is_int_register()) - emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx - set_last_upper_reg(dst, dstp, REG_EDX); + a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi],edx + set_last_upper_reg(a, dstp, edx); } - set_last_lower_reg(dst, dstp, dstreg); + set_last_lower_reg(a, dstp, dstreg); } @@ -3744,7 +2865,7 @@ void drcbe_x86::op_loads(x86code *&dst, const instruction &inst) // op_store - process a STORE opcode //------------------------------------------------- -void drcbe_x86::op_store(x86code *&dst, const instruction &inst) +void drcbe_x86::op_store(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -3755,31 +2876,32 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst) be_parameter basep(*this, inst.param(0), PTYPE_M); be_parameter indp(*this, inst.param(1), PTYPE_MRI); be_parameter srcp(*this, inst.param(2), PTYPE_MRI); - const parameter &scalesizep = inst.param(3); - int scale = 1 << (scalesizep.scale()); - int size = scalesizep.size(); + parameter const &scalesizep = inst.param(3); + int const size = scalesizep.size(); // pick a source register for the general case - int srcreg = srcp.select_register(REG_EAX); - if (size == SIZE_BYTE && (srcreg & 4)) - srcreg = REG_EAX; + Gp srcreg = srcp.select_register(eax); + if (size == SIZE_BYTE && (srcreg.id() & 4)) // FIXME: &4? + srcreg = eax; // degenerate case: constant index if (indp.is_immediate()) { + int const scale = 1 << (scalesizep.scale()); + // immediate source if (srcp.is_immediate()) { if (size == SIZE_BYTE) - emit_mov_m8_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp + a.mov(MABS(basep.memory(scale*indp.immediate()), 1), srcp.immediate()); // mov [basep + scale*indp],srcp else if (size == SIZE_WORD) - emit_mov_m16_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp + a.mov(MABS(basep.memory(scale*indp.immediate()), 2), srcp.immediate()); // mov [basep + scale*indp],srcp else if (size == SIZE_DWORD) - emit_mov_m32_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp + a.mov(MABS(basep.memory(scale*indp.immediate()), 4), srcp.immediate()); // mov [basep + scale*indp],srcp else if (size == SIZE_QWORD) { - emit_mov_m32_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp - emit_mov_m32_imm(dst, MABS(basep.memory(scale*indp.immediate() + 4)), srcp.immediate() >> 32); + a.mov(MABS(basep.memory(scale*indp.immediate()), 4), srcp.immediate()); // mov [basep + scale*indp],srcp + a.mov(MABS(basep.memory(scale*indp.immediate() + 4), 4), srcp.immediate() >> 32); // mov [basep + scale*indp + 4],srcp >> 32 } } @@ -3788,19 +2910,19 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst) else { if (size != SIZE_QWORD) - emit_mov_r32_p32(dst, srcreg, srcp); // mov srcreg,srcp + emit_mov_r32_p32(a, srcreg, srcp); // mov srcreg,srcp else - emit_mov_r64_p64(dst, srcreg, REG_EDX, srcp); // mov edx:srcreg,srcp + emit_mov_r64_p64(a, srcreg, edx, srcp); // mov edx:srcreg,srcp if (size == SIZE_BYTE) - emit_mov_m8_r8(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg + a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg.r8()); // mov [basep + scale*indp],srcreg else if (size == SIZE_WORD) - emit_mov_m16_r16(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg + a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg.r16()); // mov [basep + scale*indp],srcreg else if (size == SIZE_DWORD) - emit_mov_m32_r32(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg + a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg else if (size == SIZE_QWORD) { - emit_mov_m32_r32(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg - emit_mov_m32_r32(dst, MABS(basep.memory(scale*indp.immediate() + 4)), REG_EDX); // mov [basep + scale*indp + 4],edx + a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg + a.mov(MABS(basep.memory(scale*indp.immediate() + 4)), edx); // mov [basep + scale*indp + 4],edx } } } @@ -3808,22 +2930,22 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst) // normal case: variable index else { - int indreg = indp.select_register(REG_ECX); - emit_mov_r32_p32(dst, indreg, indp); // mov indreg,indp + Gp const indreg = indp.select_register(ecx); + emit_mov_r32_p32(a, indreg, indp); // mov indreg,indp // immediate source if (srcp.is_immediate()) { if (size == SIZE_BYTE) - emit_mov_m8_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 1*ecx],srcp + a.mov(ptr(u64(basep.memory()), indreg, scalesizep.scale(), 1), srcp.immediate()); // mov [basep + 1*ecx],srcp else if (size == SIZE_WORD) - emit_mov_m16_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 2*ecx],srcp + a.mov(ptr(u64(basep.memory()), indreg, scalesizep.scale(), 2), srcp.immediate()); // mov [basep + 2*ecx],srcp else if (size == SIZE_DWORD) - emit_mov_m32_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 4*ecx],srcp + a.mov(ptr(u64(basep.memory()), indreg, scalesizep.scale(), 4), srcp.immediate()); // mov [basep + 4*ecx],srcp else if (size == SIZE_QWORD) { - emit_mov_m32_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 8*ecx],srcp - emit_mov_m32_imm(dst, MABSI(basep.memory(4), indreg, scale), srcp.immediate() >> 32); + a.mov(ptr(u64(basep.memory(0)), indreg, scalesizep.scale(), 4), srcp.immediate()); // mov [basep + 8*ecx],srcp + a.mov(ptr(u64(basep.memory(4)), indreg, scalesizep.scale(), 4), srcp.immediate() >> 32); // mov [basep + 8*ecx + 4],srcp >> 32 } } @@ -3832,19 +2954,19 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst) else { if (size != SIZE_QWORD) - emit_mov_r32_p32(dst, srcreg, srcp); // mov srcreg,srcp + emit_mov_r32_p32(a, srcreg, srcp); // mov srcreg,srcp else - emit_mov_r64_p64(dst, srcreg, REG_EDX, srcp); // mov edx:srcreg,srcp + emit_mov_r64_p64(a, srcreg, edx, srcp); // mov edx:srcreg,srcp if (size == SIZE_BYTE) - emit_mov_m8_r8(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 1*ecx],srcreg + a.mov(ptr(u64(basep.memory()), indreg, scalesizep.scale()), srcreg.r8()); // mov [basep + 1*ecx],srcreg else if (size == SIZE_WORD) - emit_mov_m16_r16(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 2*ecx],srcreg + a.mov(ptr(u64(basep.memory()), indreg, scalesizep.scale()), srcreg.r16()); // mov [basep + 2*ecx],srcreg else if (size == SIZE_DWORD) - emit_mov_m32_r32(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 4*ecx],srcreg + a.mov(ptr(u64(basep.memory()), indreg, scalesizep.scale()), srcreg); // mov [basep + 4*ecx],srcreg else if (size == SIZE_QWORD) { - emit_mov_m32_r32(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 8*ecx],srcreg - emit_mov_m32_r32(dst, MABSI(basep.memory(4), indreg, scale), REG_EDX); // mov [basep + 8*ecx],edx + a.mov(ptr(u64(basep.memory(0)), indreg, scalesizep.scale()), srcreg); // mov [basep + 8*ecx],srcreg + a.mov(ptr(u64(basep.memory(4)), indreg, scalesizep.scale()), edx); // mov [basep + 8*ecx],edx } } } @@ -3855,7 +2977,7 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst) // op_read - process a READ opcode //------------------------------------------------- -void drcbe_x86::op_read(x86code *&dst, const instruction &inst) +void drcbe_x86::op_read(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -3865,42 +2987,38 @@ void drcbe_x86::op_read(x86code *&dst, const instruction &inst) // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter addrp(*this, inst.param(1), PTYPE_MRI); - const parameter &spacesizep = inst.param(2); + parameter const &spacesizep = inst.param(2); assert(spacesizep.is_size_space()); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // set up a call to the read byte handler - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space + emit_mov_m32_p32(a, dword_ptr(esp, 4), addrp); // mov [esp+4],addrp + a.mov(dword_ptr(esp, 0), imm(m_space[spacesizep.space()])); // mov [esp],space if (spacesizep.size() == SIZE_BYTE) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_byte); - // call read_byte - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al + a.call(imm(m_accessors[spacesizep.space()].read_byte)); // call read_byte + a.movzx(dstreg, al); // movzx dstreg,al } else if (spacesizep.size() == SIZE_WORD) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_word); - // call read_word - emit_movzx_r32_r16(dst, dstreg, REG_AX); // movzx dstreg,ax + a.call(imm(m_accessors[spacesizep.space()].read_word)); // call read_word + a.movzx(dstreg, ax); // movzx dstreg,ax } else if (spacesizep.size() == SIZE_DWORD) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_dword); - // call read_dword - emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax + a.call(imm(m_accessors[spacesizep.space()].read_dword)); // call read_dword + a.mov(dstreg, eax); // mov dstreg,eax } else if (spacesizep.size() == SIZE_QWORD) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_qword); - // call read_qword - emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax + a.call(imm(m_accessors[spacesizep.space()].read_qword)); // call read_qword + a.mov(dstreg, eax); // mov dstreg,eax } // store low 32 bits - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) @@ -3909,18 +3027,18 @@ void drcbe_x86::op_read(x86code *&dst, const instruction &inst) if (spacesizep.size() != SIZE_QWORD) { if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0 else if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0 } // 8-byte case else { if (dstp.is_memory()) - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx else if (dstp.is_int_register()) - emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx + a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi],edx } } } @@ -3930,7 +3048,7 @@ void drcbe_x86::op_read(x86code *&dst, const instruction &inst) // op_readm - process a READM opcode //------------------------------------------------- -void drcbe_x86::op_readm(x86code *&dst, const instruction &inst) +void drcbe_x86::op_readm(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -3941,40 +3059,37 @@ void drcbe_x86::op_readm(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter addrp(*this, inst.param(1), PTYPE_MRI); be_parameter maskp(*this, inst.param(2), PTYPE_MRI); - const parameter &spacesizep = inst.param(3); + parameter const &spacesizep = inst.param(3); assert(spacesizep.is_size_space()); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // set up a call to the read byte handler if (spacesizep.size() != SIZE_QWORD) - emit_mov_m32_p32(dst, MBD(REG_ESP, 8), maskp); // mov [esp+8],maskp + emit_mov_m32_p32(a, dword_ptr(esp, 8), maskp); // mov [esp+8],maskp else - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), maskp); // mov [esp+8],maskp - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space + emit_mov_m64_p64(a, qword_ptr(esp, 8), maskp); // mov [esp+8],maskp + emit_mov_m32_p32(a, dword_ptr(esp, 4), addrp); // mov [esp+4],addrp + a.mov(dword_ptr(esp, 0), imm(m_space[spacesizep.space()])); // mov [esp],space if (spacesizep.size() == SIZE_WORD) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_word_masked); - // call read_word_masked - emit_movzx_r32_r16(dst, dstreg, REG_AX); // movzx dstreg,ax + a.call(imm(m_accessors[spacesizep.space()].read_word_masked)); // call read_word_masked + a.movzx(dstreg, ax); // movzx dstreg,ax } else if (spacesizep.size() == SIZE_DWORD) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_dword_masked); - // call read_dword_masked - emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax + a.call(imm(m_accessors[spacesizep.space()].read_dword_masked)); // call read_dword_masked + a.mov(dstreg, eax); // mov dstreg,eax } else if (spacesizep.size() == SIZE_QWORD) { - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_qword_masked); - // call read_qword_masked - emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax + a.call(imm(m_accessors[spacesizep.space()].read_qword_masked)); // call read_qword_masked + a.mov(dstreg, eax); // mov dstreg,eax } // store low 32 bits - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) @@ -3983,18 +3098,18 @@ void drcbe_x86::op_readm(x86code *&dst, const instruction &inst) if (spacesizep.size() != SIZE_QWORD) { if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0 else if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0 } // 8-byte case else { if (dstp.is_memory()) - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx else if (dstp.is_int_register()) - emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx + a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi],edx } } } @@ -4004,7 +3119,7 @@ void drcbe_x86::op_readm(x86code *&dst, const instruction &inst) // op_write - process a WRITE opcode //------------------------------------------------- -void drcbe_x86::op_write(x86code *&dst, const instruction &inst) +void drcbe_x86::op_write(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4014,28 +3129,24 @@ void drcbe_x86::op_write(x86code *&dst, const instruction &inst) // normalize parameters be_parameter addrp(*this, inst.param(0), PTYPE_MRI); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); - const parameter &spacesizep = inst.param(2); + parameter const &spacesizep = inst.param(2); assert(spacesizep.is_size_space()); // set up a call to the write byte handler if (spacesizep.size() != SIZE_QWORD) - emit_mov_m32_p32(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp + emit_mov_m32_p32(a, dword_ptr(esp, 8), srcp); // mov [esp+8],srcp else - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space + emit_mov_m64_p64(a, qword_ptr(esp, 8), srcp); // mov [esp+8],srcp + emit_mov_m32_p32(a, dword_ptr(esp, 4), addrp); // mov [esp+4],addrp + a.mov(dword_ptr(esp, 0), imm(m_space[spacesizep.space()])); // mov [esp],space if (spacesizep.size() == SIZE_BYTE) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_byte); - // call write_byte + a.call(imm(m_accessors[spacesizep.space()].write_byte)); // call write_byte else if (spacesizep.size() == SIZE_WORD) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_word); - // call write_word + a.call(imm(m_accessors[spacesizep.space()].write_word)); // call write_word else if (spacesizep.size() == SIZE_DWORD) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_dword); - // call write_dword + a.call(imm(m_accessors[spacesizep.space()].write_dword)); // call write_dword else if (spacesizep.size() == SIZE_QWORD) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_qword); - // call write_qword + a.call(imm(m_accessors[spacesizep.space()].write_qword)); // call write_qword } @@ -4043,7 +3154,7 @@ void drcbe_x86::op_write(x86code *&dst, const instruction &inst) // op_writem - process a WRITEM opcode //------------------------------------------------- -void drcbe_x86::op_writem(x86code *&dst, const instruction &inst) +void drcbe_x86::op_writem(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4054,31 +3165,28 @@ void drcbe_x86::op_writem(x86code *&dst, const instruction &inst) be_parameter addrp(*this, inst.param(0), PTYPE_MRI); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); be_parameter maskp(*this, inst.param(2), PTYPE_MRI); - const parameter &spacesizep = inst.param(3); + parameter const &spacesizep = inst.param(3); assert(spacesizep.is_size_space()); // set up a call to the write byte handler if (spacesizep.size() != SIZE_QWORD) { - emit_mov_m32_p32(dst, MBD(REG_ESP, 12), maskp); // mov [esp+12],maskp - emit_mov_m32_p32(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp + emit_mov_m32_p32(a, dword_ptr(esp, 12), maskp); // mov [esp+12],maskp + emit_mov_m32_p32(a, dword_ptr(esp, 8), srcp); // mov [esp+8],srcp } else { - emit_mov_m64_p64(dst, MBD(REG_ESP, 16), maskp); // mov [esp+16],maskp - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp + emit_mov_m64_p64(a, qword_ptr(esp, 16), maskp); // mov [esp+16],maskp + emit_mov_m64_p64(a, qword_ptr(esp, 8), srcp); // mov [esp+8],srcp } - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space + emit_mov_m32_p32(a, dword_ptr(esp, 4), addrp); // mov [esp+4],addrp + a.mov(dword_ptr(esp, 0), imm(m_space[spacesizep.space()])); // mov [esp],space if (spacesizep.size() == SIZE_WORD) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_word_masked); - // call write_word_masked + a.call(imm(m_accessors[spacesizep.space()].write_word_masked)); // call write_word_masked else if (spacesizep.size() == SIZE_DWORD) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_dword_masked); - // call write_dword_masked + a.call(imm(m_accessors[spacesizep.space()].write_dword_masked)); // call write_dword_masked else if (spacesizep.size() == SIZE_QWORD) - emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_qword_masked); - // call write_qword_masked + a.call(imm(m_accessors[spacesizep.space()].write_qword_masked)); // call write_qword_masked } @@ -4086,7 +3194,7 @@ void drcbe_x86::op_writem(x86code *&dst, const instruction &inst) // op_carry - process a CARRY opcode //------------------------------------------------- -void drcbe_x86::op_carry(x86code *&dst, const instruction &inst) +void drcbe_x86::op_carry(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4101,16 +3209,16 @@ void drcbe_x86::op_carry(x86code *&dst, const instruction &inst) if (srcp.is_immediate() && bitp.is_immediate()) { if (srcp.immediate() & ((uint64_t)1 << bitp.immediate())) - emit_stc(dst); + a.stc(); else - emit_clc(dst); + a.clc(); } // load non-immediate bit numbers into a register if (!bitp.is_immediate()) { - emit_mov_r32_p32(dst, REG_ECX, bitp); - emit_and_r32_imm(dst, REG_ECX, inst.size() * 8 - 1); + emit_mov_r32_p32(a, ecx, bitp); + a.and_(ecx, inst.size() * 8 - 1); } // 32-bit form @@ -4119,16 +3227,16 @@ void drcbe_x86::op_carry(x86code *&dst, const instruction &inst) if (bitp.is_immediate()) { if (srcp.is_memory()) - emit_bt_m32_imm(dst, MABS(srcp.memory()), bitp.immediate()); // bt [srcp],bitp + a.bt(MABS(srcp.memory(), 4), bitp.immediate()); // bt [srcp],bitp else if (srcp.is_int_register()) - emit_bt_r32_imm(dst, srcp.ireg(), bitp.immediate()); // bt srcp,bitp + a.bt(Gpd(srcp.ireg()), bitp.immediate()); // bt srcp,bitp } else { if (srcp.is_memory()) - emit_bt_m32_r32(dst, MABS(srcp.memory()), REG_ECX); // bt [srcp],ecx + a.bt(MABS(srcp.memory()), ecx); // bt [srcp],ecx else if (srcp.is_int_register()) - emit_bt_r32_r32(dst, srcp.ireg(), REG_ECX); // bt [srcp],ecx + a.bt(Gpd(srcp.ireg()), ecx); // bt [srcp],ecx } } @@ -4138,20 +3246,20 @@ void drcbe_x86::op_carry(x86code *&dst, const instruction &inst) if (bitp.is_immediate()) { if (srcp.is_memory()) - emit_bt_m32_imm(dst, MABS(srcp.memory()), bitp.immediate()); // bt [srcp],bitp + a.bt(MABS(srcp.memory(), 4), bitp.immediate()); // bt [srcp],bitp else if (srcp.is_int_register() && bitp.immediate() < 32) - emit_bt_r32_imm(dst, srcp.ireg(), bitp.immediate()); // bt srcp,bitp + a.bt(Gpd(srcp.ireg()), bitp.immediate()); // bt srcp,bitp else if (srcp.is_int_register() && bitp.immediate() >= 32) - emit_bt_m32_imm(dst, MABS(m_reghi[srcp.ireg()]), bitp.immediate() - 32); // bt [srcp.hi],bitp + a.bt(MABS(m_reghi[srcp.ireg()], 4), bitp.immediate() - 32); // bt [srcp.hi],bitp } else { if (srcp.is_memory()) - emit_bt_m32_r32(dst, MABS(srcp.memory()), REG_ECX); // bt [srcp],ecx + a.bt(MABS(srcp.memory()), ecx); // bt [srcp],ecx else if (srcp.is_int_register()) { - emit_mov_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), srcp.ireg()); // mov [srcp.lo],srcp - emit_bt_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), REG_ECX); // bt [srcp],ecx + a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov [srcp.lo],srcp + a.bt(MABS(m_reglo[srcp.ireg()]), ecx); // bt [srcp],ecx } } } @@ -4162,7 +3270,7 @@ void drcbe_x86::op_carry(x86code *&dst, const instruction &inst) // op_set - process a SET opcode //------------------------------------------------- -void drcbe_x86::op_set(x86code *&dst, const instruction &inst) +void drcbe_x86::op_set(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4173,23 +3281,23 @@ void drcbe_x86::op_set(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_MR); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // set to AL - emit_setcc_r8(dst, X86_CONDITION(inst.condition()), REG_AL); // setcc al - emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al + a.set(X86_CONDITION(inst.condition()), al); // setcc al + a.movzx(dstreg, al); // movzx dstreg,al // store low 32 bits - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) { // general case if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0 else if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0 } } @@ -4198,10 +3306,8 @@ void drcbe_x86::op_set(x86code *&dst, const instruction &inst) // op_mov - process a MOV opcode //------------------------------------------------- -void drcbe_x86::op_mov(x86code *&dst, const instruction &inst) +void drcbe_x86::op_mov(Assembler &a, const instruction &inst) { - x86code *savedst = dst; - // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_any_condition(inst); @@ -4212,47 +3318,39 @@ void drcbe_x86::op_mov(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); - // always start with a jmp - emit_link skip = { nullptr }; - if (inst.condition() != uml::COND_ALWAYS) - emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + // add a conditional branch unless a conditional move is possible + Label skip = a.newLabel(); + if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate()))) + a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip // 32-bit form if (inst.size() == 4) { // register to memory if (dstp.is_memory() && srcp.is_int_register()) - emit_mov_m32_r32(dst, MABS(dstp.memory()), srcp.ireg()); // mov [dstp],srcp + a.mov(MABS(dstp.memory()), Gpd(srcp.ireg())); // mov [dstp],srcp // immediate to memory else if (dstp.is_memory() && srcp.is_immediate()) - emit_mov_m32_imm(dst, MABS(dstp.memory()), srcp.immediate()); // mov [dstp],srcp + a.mov(MABS(dstp.memory(), 4), srcp.immediate()); // mov [dstp],srcp // conditional memory to register else if (inst.condition() != uml::COND_ALWAYS && dstp.is_int_register() && srcp.is_memory()) - { - dst = savedst; - skip.target = nullptr; - emit_cmovcc_r32_m32(dst, X86_CONDITION(inst.condition()), dstp.ireg(), MABS(srcp.memory())); + a.cmov(X86_CONDITION(inst.condition()), Gpd(dstp.ireg()), MABS(srcp.memory())); // cmovcc dstp,[srcp] - } // conditional register to register else if (inst.condition() != uml::COND_ALWAYS && dstp.is_int_register() && srcp.is_int_register()) - { - dst = savedst; - skip.target = nullptr; - emit_cmovcc_r32_r32(dst, X86_CONDITION(inst.condition()), dstp.ireg(), srcp.ireg()); + a.cmov(X86_CONDITION(inst.condition()), Gpd(dstp.ireg()), Gpd(srcp.ireg())); // cmovcc dstp,srcp - } // general case else { - emit_mov_r32_p32_keepflags(dst, dstreg, srcp); // mov dstreg,srcp - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32_keepflags(a, dstreg, srcp); // mov dstreg,srcp + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -4262,29 +3360,32 @@ void drcbe_x86::op_mov(x86code *&dst, const instruction &inst) // register to memory if (dstp.is_memory() && srcp.is_int_register()) { - emit_mov_r32_m32(dst, REG_EAX, MABS(m_reghi[srcp.ireg()])); // mov eax,reghi[srcp] - emit_mov_m32_r32(dst, MABS(dstp.memory()), srcp.ireg()); // mov [dstp],srcp - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EAX); // mov [dstp+4],eax + a.mov(eax, MABS(m_reghi[srcp.ireg()])); // mov eax,reghi[srcp] + a.mov(MABS(dstp.memory(0)), Gpd(srcp.ireg())); // mov [dstp],srcp + a.mov(MABS(dstp.memory(4)), eax); // mov [dstp+4],eax } // immediate to memory else if (dstp.is_memory() && srcp.is_immediate()) { - emit_mov_m32_imm(dst, MABS(dstp.memory()), srcp.immediate()); // mov [dstp],srcp - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), srcp.immediate() >> 32); // mov [dstp+4],srcp >> 32 + a.mov(MABS(dstp.memory(0), 4), srcp.immediate()); // mov [dstp],srcp + a.mov(MABS(dstp.memory(4), 4), srcp.immediate() >> 32); // mov [dstp+4],srcp >> 32 } // general case else { - emit_mov_r64_p64(dst, dstreg, REG_EDX, srcp); // mov edx:dstreg,srcp - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg + emit_mov_r64_p64(a, dstreg, edx, srcp); // mov edx:dstreg,srcp + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } - // resolve the jump - if (skip.target != nullptr) - track_resolve_link(dst, skip); + // bind the label + if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate()))) + { + a.bind(skip); + reset_last_upper_lower_reg(); + } } @@ -4292,7 +3393,7 @@ void drcbe_x86::op_mov(x86code *&dst, const instruction &inst) // op_sext - process a SEXT opcode //------------------------------------------------- -void drcbe_x86::op_sext(x86code *&dst, const instruction &inst) +void drcbe_x86::op_sext(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4302,50 +3403,50 @@ void drcbe_x86::op_sext(x86code *&dst, const instruction &inst) // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); - const parameter &sizep = inst.param(2); + parameter const &sizep = inst.param(2); assert(sizep.is_size()); // pick a target register for the general case - int dstreg = (inst.size() == 8) ? REG_EAX : dstp.select_register(REG_EAX); + Gp const dstreg = (inst.size() == 8) ? eax : dstp.select_register(eax); // convert 8-bit source registers to EAX if (sizep.size() == SIZE_BYTE && srcp.is_int_register() && (srcp.ireg() & 4)) { - emit_mov_r32_r32(dst, REG_EAX, srcp.ireg()); // mov eax,srcp - srcp = be_parameter::make_ireg(REG_EAX); + a.mov(eax, Gpd(srcp.ireg())); // mov eax,srcp + srcp = be_parameter::make_ireg(eax.id()); } // general case if (srcp.is_memory()) { if (sizep.size() == SIZE_BYTE) - emit_movsx_r32_m8(dst, dstreg, MABS(srcp.memory())); // movsx dstreg,[srcp] + a.movsx(dstreg, MABS(srcp.memory(), 1)); // movsx dstreg,[srcp] else if (sizep.size() == SIZE_WORD) - emit_movsx_r32_m16(dst, dstreg, MABS(srcp.memory())); // movsx dstreg,[srcp] + a.movsx(dstreg, MABS(srcp.memory(), 2)); // movsx dstreg,[srcp] else if (sizep.size() == SIZE_DWORD) - emit_mov_r32_m32(dst, dstreg, MABS(srcp.memory())); // mov dstreg,[srcp] + a.mov(dstreg, MABS(srcp.memory())); // mov dstreg,[srcp] } else if (srcp.is_int_register()) { if (sizep.size() == SIZE_BYTE) - emit_movsx_r32_r8(dst, dstreg, srcp.ireg()); // movsx dstreg,srcp + a.movsx(dstreg, GpbLo(srcp.ireg())); // movsx dstreg,srcp else if (sizep.size() == SIZE_WORD) - emit_movsx_r32_r16(dst, dstreg, srcp.ireg()); // movsx dstreg,srcp - else if (sizep.size() == SIZE_DWORD && dstreg != srcp.ireg()) - emit_mov_r32_r32(dst, dstreg, srcp.ireg()); // mov dstreg,srcp + a.movsx(dstreg, Gpw(srcp.ireg())); // movsx dstreg,srcp + else if (sizep.size() == SIZE_DWORD && dstreg.id() != srcp.ireg()) + a.mov(dstreg, Gpd(srcp.ireg())); // mov dstreg,srcp } if (inst.flags() != 0) - emit_test_r32_r32(dst, dstreg, dstreg); // test dstreg,dstreg + a.test(dstreg, dstreg); // test dstreg,dstreg // 32-bit form: store the low 32 bits if (inst.size() == 4) - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form: sign extend to 64 bits and store edx:eax else if (inst.size() == 8) { - emit_cdq(dst); // cdq - emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax + a.cdq(); // cdq + emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax } } @@ -4354,7 +3455,7 @@ void drcbe_x86::op_sext(x86code *&dst, const instruction &inst) // op_roland - process an ROLAND opcode //------------------------------------------------- -void drcbe_x86::op_roland(x86code *&dst, const instruction &inst) +void drcbe_x86::op_roland(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4368,24 +3469,42 @@ void drcbe_x86::op_roland(x86code *&dst, const instruction &inst) be_parameter maskp(*this, inst.param(3), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, shiftp, maskp); + Gp const dstreg = dstp.select_register(eax, shiftp, maskp); // 32-bit form if (inst.size() == 4) { - emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,srcp - emit_rol_r32_p32(dst, dstreg, shiftp, inst); // rol dstreg,shiftp - emit_and_r32_p32(dst, dstreg, maskp, inst); // and dstreg,maskp - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,srcp + shift_op_param(a, Inst::kIdRol, dstreg, shiftp, // rol dstreg,shiftp + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + alu_op_param(a, Inst::kIdAnd, dstreg, maskp, // and dstreg,maskp + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && !src.immediate()) + { + a.xor_(dst.as<Gpd>(), dst.as<Gpd>()); + return true; + } + else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + return true; + + return false; + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { - emit_mov_r64_p64(dst, dstreg, REG_EDX, srcp); // mov edx:dstreg,srcp - emit_rol_r64_p64(dst, dstreg, REG_EDX, shiftp, inst); // rol edx:dstreg,shiftp - emit_and_r64_p64(dst, dstreg, REG_EDX, maskp, inst); // and edx:dstreg,maskp - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg + emit_mov_r64_p64(a, dstreg, edx, srcp); // mov edx:dstreg,srcp + emit_rol_r64_p64(a, dstreg, edx, shiftp, inst); // rol edx:dstreg,shiftp + emit_and_r64_p64(a, dstreg, edx, maskp, inst); // and edx:dstreg,maskp + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -4394,7 +3513,7 @@ void drcbe_x86::op_roland(x86code *&dst, const instruction &inst) // op_rolins - process an ROLINS opcode //------------------------------------------------- -void drcbe_x86::op_rolins(x86code *&dst, const instruction &inst) +void drcbe_x86::op_rolins(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4408,89 +3527,93 @@ void drcbe_x86::op_rolins(x86code *&dst, const instruction &inst) be_parameter maskp(*this, inst.param(3), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_ECX, shiftp, maskp); + Gp const dstreg = dstp.select_register(ecx, shiftp, maskp); // 32-bit form if (inst.size() == 4) { - emit_mov_r32_p32(dst, REG_EAX, srcp); // mov eax,srcp - emit_rol_r32_p32(dst, REG_EAX, shiftp, inst); // rol eax,shiftp - emit_mov_r32_p32(dst, dstreg, dstp); // mov dstreg,dstp + emit_mov_r32_p32(a, eax, srcp); // mov eax,srcp + shift_op_param(a, Inst::kIdRol, eax, shiftp, // rol eax,shiftp + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_r32_p32(a, dstreg, dstp); // mov dstreg,dstp if (maskp.is_immediate()) { - emit_and_r32_imm(dst, REG_EAX, maskp.immediate()); // and eax,maskp - emit_and_r32_imm(dst, dstreg, ~maskp.immediate()); // and dstreg,~maskp + a.and_(eax, maskp.immediate()); // and eax,maskp + a.and_(dstreg, ~maskp.immediate()); // and dstreg,~maskp } else { - emit_mov_r32_p32(dst, REG_EDX, maskp); // mov edx,maskp - emit_and_r32_r32(dst, REG_EAX, REG_EDX); // and eax,edx - emit_not_r32(dst, REG_EDX); // not edx - emit_and_r32_r32(dst, dstreg, REG_EDX); // and dstreg,edx + emit_mov_r32_p32(a, edx, maskp); // mov edx,maskp + a.and_(eax, edx); // and eax,edx + a.not_(edx); // not edx + a.and_(dstreg, edx); // and dstreg,edx } - emit_or_r32_r32(dst, dstreg, REG_EAX); // or dstreg,eax - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + a.or_(dstreg, eax); // or dstreg,eax + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { - emit_mov_r64_p64(dst, REG_EAX, REG_EDX, srcp); // mov edx:eax,srcp - emit_rol_r64_p64(dst, REG_EAX, REG_EDX, shiftp, inst); // rol edx:eax,shiftp + emit_mov_r64_p64(a, eax, edx, srcp); // mov edx:eax,srcp + emit_rol_r64_p64(a, eax, edx, shiftp, inst); // rol edx:eax,shiftp if (maskp.is_immediate()) { - emit_and_r32_imm(dst, REG_EAX, maskp.immediate()); // and eax,maskp - emit_and_r32_imm(dst, REG_EDX, maskp.immediate() >> 32); // and edx,maskp >> 32 + a.and_(eax, maskp.immediate()); // and eax,maskp + a.and_(edx, maskp.immediate() >> 32); // and edx,maskp >> 32 if (dstp.is_int_register()) { - emit_and_r32_imm(dst, dstp.ireg(), ~maskp.immediate()); // and dstp.lo,~maskp - emit_and_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), ~maskp.immediate() >> 32);// and dstp.hi,~maskp >> 32 - emit_or_r32_r32(dst, dstp.ireg(), REG_EAX); // or dstp.lo,eax - emit_or_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // or dstp.hi,edx + a.and_(Gpd(dstp.ireg()), ~maskp.immediate()); // and dstp.lo,~maskp + a.and_(MABS(m_reghi[dstp.ireg()], 4), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32 + a.or_(Gpd(dstp.ireg()), eax); // or dstp.lo,eax + a.or_(MABS(m_reghi[dstp.ireg()]), edx); // or dstp.hi,edx } else { - emit_and_m32_imm(dst, MABS(dstp.memory()), ~maskp.immediate()); // and dstp.lo,~maskp - emit_and_m32_imm(dst, MABS(dstp.memory(4)), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32 - emit_or_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // or dstp.lo,eax - emit_or_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // or dstp.hi,edx + a.and_(MABS(dstp.memory(0), 4), ~maskp.immediate()); // and dstp.lo,~maskp + a.and_(MABS(dstp.memory(4), 4), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32 + a.or_(MABS(dstp.memory(0)), eax); // or dstp.lo,eax + a.or_(MABS(dstp.memory(4)), edx); // or dstp.hi,edx } } else { - int tempreg = REG_EBX; - emit_mov_m32_r32(dst, MBD(REG_ESP, -8), tempreg); // mov [esp-8],ebx - emit_mov_r64_p64(dst, tempreg, REG_ECX, maskp); // mov ecx:ebx,maskp - emit_and_r32_r32(dst, REG_EAX, tempreg); // and eax,ebx - emit_and_r32_r32(dst, REG_EDX, REG_ECX); // and edx,ecx - emit_not_r32(dst, tempreg); // not ebx - emit_not_r32(dst, REG_ECX); // not ecx + a.mov(ptr(esp, -8), ebx); // mov [esp-8],ebx + emit_mov_r64_p64(a, ebx, ecx, maskp); // mov ecx:ebx,maskp + a.and_(eax, ebx); // and eax,ebx + a.and_(edx, ecx); // and edx,ecx + a.not_(ebx); // not ebx + a.not_(ecx); // not ecx if (dstp.is_int_register()) { - emit_and_r32_r32(dst, dstp.ireg(), tempreg); // and dstp.lo,ebx - emit_and_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_ECX); // and dstp.hi,ecx - emit_or_r32_r32(dst, dstp.ireg(), REG_EAX); // or dstp.lo,eax - emit_or_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // or dstp.hi,edx + a.and_(Gpd(dstp.ireg()), ebx); // and dstp.lo,ebx + a.and_(MABS(m_reghi[dstp.ireg()]), ecx); // and dstp.hi,ecx + a.or_(Gpd(dstp.ireg()), eax); // or dstp.lo,eax + a.or_(MABS(m_reghi[dstp.ireg()]), edx); // or dstp.hi,edx } else { - emit_and_m32_r32(dst, MABS(dstp.memory()), tempreg); // and dstp.lo,ebx - emit_and_m32_r32(dst, MABS(dstp.memory(4)), REG_ECX); // and dstp.hi,ecx - emit_or_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // or dstp.lo,eax - emit_or_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // or dstp.hi,edx + a.and_(MABS(dstp.memory(0)), ebx); // and dstp.lo,ebx + a.and_(MABS(dstp.memory(4)), ecx); // and dstp.hi,ecx + a.or_(MABS(dstp.memory(0)), eax); // or dstp.lo,eax + a.or_(MABS(dstp.memory(4)), edx); // or dstp.hi,edx } - emit_mov_r32_m32(dst, tempreg, MBD(REG_ESP, -8)); // mov ebx,[esp-8] + a.mov(ebx, ptr(esp, -8)); // mov ebx,[esp-8] } if (inst.flags() == FLAG_Z) - emit_or_r32_r32(dst, REG_EAX, REG_EDX); // or eax,edx + a.or_(eax, edx); // or eax,edx else if (inst.flags() == FLAG_S) ;// do nothing -- final OR will have the right result else if (inst.flags() == (FLAG_Z | FLAG_S)) { - emit_movzx_r32_r16(dst, REG_ECX, REG_AX); // movzx ecx,ax - emit_shr_r32_imm(dst, REG_EAX, 16); // shr eax,16 - emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx - emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax + a.movzx(ecx, ax); // movzx ecx,ax + a.shr(eax, 16); // shr eax,16 + a.or_(edx, ecx); // or edx,ecx + a.or_(edx, eax); // or edx,eax } } } @@ -4500,7 +3623,7 @@ void drcbe_x86::op_rolins(x86code *&dst, const instruction &inst) // op_add - process a ADD opcode //------------------------------------------------- -void drcbe_x86::op_add(x86code *&dst, const instruction &inst) +void drcbe_x86::op_add(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4514,29 +3637,39 @@ void drcbe_x86::op_add(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_add_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // add [dstp],src2p + alu_op_param(a, Inst::kIdAdd, MABS(dstp.memory(), 4), src2p, // add [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // reg = reg + imm - else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && inst.flags() == 0) - emit_lea_r32_m32(dst, dstp.ireg(), MBD(src1p.ireg(), src2p.immediate())); // lea dstp,[src1p+src2p] + else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && !inst.flags()) + a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), src2p.immediate())); // lea dstp,[src1p+src2p] // reg = reg + reg - else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_int_register() && inst.flags() == 0) - emit_lea_r32_m32(dst, dstp.ireg(), MBISD(src1p.ireg(), src2p.ireg(), 1, 0)); // lea dstp,[src1p+src2p] + else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_int_register() && !inst.flags()) + a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), Gpd(src2p.ireg()))); // lea dstp,[src1p+src2p] // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_add_r32_p32(dst, dstreg, src2p, inst); // add dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdAdd, dstreg, src2p, // add dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -4545,14 +3678,16 @@ void drcbe_x86::op_add(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_add_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // add [dstp],src2p + alu_op_param(a, Inst::kIdAdd, Inst::kIdAdc, // add [dstp],src2p + MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_add_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // add dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + alu_op_param(a, Inst::kIdAdd, Inst::kIdAdc, // add edx:dstreg,src2p + dstreg, edx, src2p, inst.flags() & FLAG_Z); + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -4562,7 +3697,7 @@ void drcbe_x86::op_add(x86code *&dst, const instruction &inst) // op_addc - process a ADDC opcode //------------------------------------------------- -void drcbe_x86::op_addc(x86code *&dst, const instruction &inst) +void drcbe_x86::op_addc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4576,21 +3711,21 @@ void drcbe_x86::op_addc(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_adc_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // adc [dstp],src2p + alu_op_param(a, Inst::kIdAdc, MABS(dstp.memory(), 4), src2p); // adc [dstp],src2p // general case else { - emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p - emit_adc_r32_p32(dst, dstreg, src2p, inst); // adc dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdAdc, dstreg, src2p); // adc dstreg,src2p + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -4599,14 +3734,16 @@ void drcbe_x86::op_addc(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_adc_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // adc [dstp],src2p + alu_op_param(a, Inst::kIdAdc, Inst::kIdAdc, // adc [dstp],src2p + MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { - emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_adc_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // adc dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + alu_op_param(a, Inst::kIdAdc, Inst::kIdAdc, // adc edx:dstreg,src2p + dstreg, edx, src2p, inst.flags() & FLAG_Z); + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -4616,7 +3753,7 @@ void drcbe_x86::op_addc(x86code *&dst, const instruction &inst) // op_sub - process a SUB opcode //------------------------------------------------- -void drcbe_x86::op_sub(x86code *&dst, const instruction &inst) +void drcbe_x86::op_sub(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4629,25 +3766,35 @@ void drcbe_x86::op_sub(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_sub_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // sub [dstp],src2p + alu_op_param(a, Inst::kIdSub, MABS(dstp.memory(), 4), src2p, // sub [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // reg = reg - imm - else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && inst.flags() == 0) - emit_lea_r32_m32(dst, dstp.ireg(), MBD(src1p.ireg(), -src2p.immediate())); // lea dstp,[src1p-src2p] + else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && !inst.flags()) + a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), -src2p.immediate())); // lea dstp,[src1p-src2p] // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_sub_r32_p32(dst, dstreg, src2p, inst); // sub dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdSub, dstreg, src2p, // sub dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -4656,14 +3803,16 @@ void drcbe_x86::op_sub(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_sub_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // sub [dstp],src2p + alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // sub [dstp],src2p + MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_sub_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // sub dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // sub edx:dstreg,src2p + dstreg, edx, src2p, inst.flags() & FLAG_Z); + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -4673,7 +3822,7 @@ void drcbe_x86::op_sub(x86code *&dst, const instruction &inst) // op_subc - process a SUBC opcode //------------------------------------------------- -void drcbe_x86::op_subc(x86code *&dst, const instruction &inst) +void drcbe_x86::op_subc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4686,21 +3835,21 @@ void drcbe_x86::op_subc(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_sbb_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // sbb [dstp],src2p + alu_op_param(a, Inst::kIdSbb, MABS(dstp.memory(), 4), src2p); // sbb [dstp],src2p // general case else { - emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p - emit_sbb_r32_p32(dst, dstreg, src2p, inst); // sbb dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdSbb, dstreg, src2p); // sbb dstreg,src2p + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -4709,14 +3858,16 @@ void drcbe_x86::op_subc(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_sbb_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // sbb [dstp],src2p + alu_op_param(a, Inst::kIdSbb, Inst::kIdSbb, // sbb [dstp],src2p + MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { - emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_sbb_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // sbb dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + alu_op_param(a, Inst::kIdSbb, Inst::kIdSbb, // sbb edx:dstreg,src2p + dstreg, edx, src2p, inst.flags() & FLAG_Z); + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -4726,7 +3877,7 @@ void drcbe_x86::op_subc(x86code *&dst, const instruction &inst) // op_cmp - process a CMP opcode //------------------------------------------------- -void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst) +void drcbe_x86::op_cmp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -4738,21 +3889,21 @@ void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case - int src1reg = src1p.select_register(REG_EAX); + Gp const src1reg = src1p.select_register(eax); // 32-bit form if (inst.size() == 4) { // memory versus anything if (src1p.is_memory()) - emit_cmp_m32_p32(dst, MABS(src1p.memory()), src2p, inst); // cmp [dstp],src2p + alu_op_param(a, Inst::kIdCmp, MABS(src1p.memory(), 4), src2p); // cmp [src1p],src2p // general case else { if (src1p.is_immediate()) - emit_mov_r32_imm(dst, src1reg, src1p.immediate()); // mov src1reg,imm - emit_cmp_r32_p32(dst, src1reg, src2p, inst); // cmp src1reg,src2p + a.mov(src1reg, src1p.immediate()); // mov src1reg,imm + alu_op_param(a, Inst::kIdCmp, src1reg, src2p); // cmp src1reg,src2p } } @@ -4760,8 +3911,11 @@ void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst) else { // general case - emit_mov_r64_p64(dst, REG_EAX, REG_EDX, src1p); // mov eax:dstp,[src1p] - emit_cmp_r64_p64(dst, REG_EAX, REG_EDX, src2p, inst); // cmp eax:dstp,src2p + emit_mov_r64_p64(a, eax, edx, src1p); // mov edx:eax,[src1p] + alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // cmp edx:eax,src2p + eax, edx, src2p, (inst.flags() & FLAG_Z) && (inst.flags() != FLAG_Z)); + if (inst.flags() == FLAG_Z) + a.or_(edx, eax); } } @@ -4770,7 +3924,7 @@ void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst) // op_mulu - process a MULU opcode //------------------------------------------------- -void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst) +void drcbe_x86::op_mulu(Assembler &a, const instruction &inst) { uint8_t zsflags = inst.flags() & (FLAG_Z | FLAG_S); uint8_t vflag = inst.flags() & FLAG_V; @@ -4792,19 +3946,19 @@ void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst) if (inst.size() == 4) { // general case - emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p + emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p if (src2p.is_memory()) - emit_mul_m32(dst, MABS(src2p.memory())); // mul [src2p] + a.mul(MABS(src2p.memory(), 4)); // mul [src2p] else if (src2p.is_int_register()) - emit_mul_r32(dst, src2p.ireg()); // mul src2p + a.mul(Gpd(src2p.ireg())); // mul src2p else if (src2p.is_immediate()) { - emit_mov_r32_imm(dst, REG_EDX, src2p.immediate()); // mov edx,src2p - emit_mul_r32(dst, REG_EDX); // mul edx + a.mov(edx, src2p.immediate()); // mov edx,src2p + a.mul(edx); // mul edx } - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_hi) - emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx + emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx // compute flags if (inst.flags() != 0) @@ -4812,32 +3966,32 @@ void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst) if (zsflags != 0) { if (vflag) - emit_pushf(dst); // pushf + a.pushfd(); // pushf if (compute_hi) { if (zsflags == FLAG_Z) - emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax + a.or_(edx, eax); // or edx,eax else if (zsflags == FLAG_S) - emit_test_r32_r32(dst, REG_EDX, REG_EDX); // test edx,edx + a.test(edx, edx); // test edx,edx else { - emit_movzx_r32_r16(dst, REG_ECX, REG_AX); // movzx ecx,ax - emit_shr_r32_imm(dst, REG_EAX, 16); // shr eax,16 - emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx - emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax + a.movzx(ecx, ax); // movzx ecx,ax + a.shr(eax, 16); // shr eax,16 + a.or_(edx, ecx); // or edx,ecx + a.or_(edx, eax); // or edx,eax } } else - emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax + a.test(eax, eax); // test eax,eax // we rely on the fact that OF is cleared by all logical operations above if (vflag) { - emit_pushf(dst); // pushf - emit_pop_r32(dst, REG_EAX); // pop eax - emit_and_m32_imm(dst, MBD(REG_ESP, 0), ~0x84); // and [esp],~0x84 - emit_or_m32_r32(dst, MBD(REG_ESP, 0), REG_EAX); // or [esp],eax - emit_popf(dst); // popf + a.pushfd(); // pushf + a.pop(eax); // pop eax + a.and_(dword_ptr(esp, 0), ~0x84); // and [esp],~0x84 + a.or_(ptr(esp, 0), eax); // or [esp],eax + a.popfd(); // popf } } } @@ -4847,28 +4001,28 @@ void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_m32_imm(dst, MBD(REG_ESP, 24), inst.flags()); // mov [esp+24],flags - emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p + a.mov(dword_ptr(esp, 24), inst.flags()); // mov [esp+24],flags + emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p + emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_hi) - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo + a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // mov [esp+4],&reshi - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo - emit_call(dst, (x86code *)dmulu); // call dmulu + a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // mov [esp+4],&reshi + a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo + a.call(imm(dmulu)); // call dmulu if (inst.flags() != 0) - emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4] - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi - emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax + a.push(ptr(u64(flags_unmap), eax, 2)); // push flags_unmap[eax*4] + a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo + a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi + emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_hi) { - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo - emit_mov_r32_m32(dst, REG_ECX, MABS((uint32_t *)&m_reshi + 1)); // mov ecx,reshi.hi - emit_mov_p64_r64(dst, edstp, REG_EAX, REG_ECX); // mov edstp,ecx:eax + a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo + a.mov(ecx, MABS((uint32_t *)&m_reshi + 1)); // mov ecx,reshi.hi + emit_mov_p64_r64(a, edstp, eax, ecx); // mov edstp,ecx:eax } if (inst.flags() != 0) - emit_popf(dst); // popf + a.popfd(); // popf } } @@ -4877,7 +4031,7 @@ void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst) // op_muls - process a MULS opcode //------------------------------------------------- -void drcbe_x86::op_muls(x86code *&dst, const instruction &inst) +void drcbe_x86::op_muls(Assembler &a, const instruction &inst) { uint8_t zsflags = inst.flags() & (FLAG_Z | FLAG_S); uint8_t vflag = inst.flags() & FLAG_V; @@ -4902,38 +4056,38 @@ void drcbe_x86::op_muls(x86code *&dst, const instruction &inst) if (!compute_hi && !src1p.is_immediate() && src2p.is_immediate()) { if (src1p.is_memory()) - emit_imul_r32_m32_imm(dst, REG_EAX, MABS(src1p.memory()), src2p.immediate()); // imul eax,[src1p],src2p + a.imul(eax, MABS(src1p.memory(), 4), src2p.immediate()); // imul eax,[src1p],src2p else if (src1p.is_int_register()) - emit_imul_r32_r32_imm(dst, REG_EAX, src1p.ireg(), src2p.immediate()); // imul eax,src1p,src2p - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + a.imul(eax, Gpd(src1p.ireg()), src2p.immediate()); // imul eax,src1p,src2p + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax } // 32-bit destination, general case else if (!compute_hi) { - emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p + emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p if (src2p.is_memory()) - emit_imul_r32_m32(dst, REG_EAX, MABS(src2p.memory())); // imul eax,[src2p] + a.imul(eax, MABS(src2p.memory(), 4)); // imul eax,[src2p] else if (src2p.is_int_register()) - emit_imul_r32_r32(dst, REG_EAX, src2p.ireg()); // imul eax,src2p - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + a.imul(eax, Gpd(src2p.ireg())); // imul eax,src2p + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax } // 64-bit destination, general case else { - emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p + emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p if (src2p.is_memory()) - emit_imul_m32(dst, MABS(src2p.memory())); // imul [src2p] + a.imul(MABS(src2p.memory(), 4)); // imul [src2p] else if (src2p.is_int_register()) - emit_imul_r32(dst, src2p.ireg()); // imul src2p + a.imul(Gpd(src2p.ireg())); // imul src2p else if (src2p.is_immediate()) { - emit_mov_r32_imm(dst, REG_EDX, src2p.immediate()); // mov edx,src2p - emit_imul_r32(dst, REG_EDX); // imul edx + a.mov(edx, src2p.immediate()); // mov edx,src2p + a.imul(edx); // imul edx } - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax - emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax + emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx } // compute flags @@ -4942,32 +4096,32 @@ void drcbe_x86::op_muls(x86code *&dst, const instruction &inst) if (zsflags != 0) { if (vflag) - emit_pushf(dst); // pushf + a.pushfd(); // pushf if (compute_hi) { if (inst.flags() == FLAG_Z) - emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax + a.or_(edx, eax); // or edx,eax else if (inst.flags() == FLAG_S) - emit_test_r32_r32(dst, REG_EDX, REG_EDX); // test edx,edx + a.test(edx, edx); // test edx,edx else { - emit_movzx_r32_r16(dst, REG_ECX, REG_AX); // movzx ecx,ax - emit_shr_r32_imm(dst, REG_EAX, 16); // shr eax,16 - emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx - emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax + a.movzx(ecx, ax); // movzx ecx,ax + a.shr(eax, 16); // shr eax,16 + a.or_(edx, ecx); // or edx,ecx + a.or_(edx, eax); // or edx,eax } } else - emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax + a.test(eax, eax); // test eax,eax // we rely on the fact that OF is cleared by all logical operations above if (vflag) { - emit_pushf(dst); // pushf - emit_pop_r32(dst, REG_EAX); // pop eax - emit_and_m32_imm(dst, MBD(REG_ESP, 0), ~0x84); // and [esp],~0x84 - emit_or_m32_r32(dst, MBD(REG_ESP, 0), REG_EAX); // or [esp],eax - emit_popf(dst); // popf + a.pushfd(); // pushf + a.pop(eax); // pop eax + a.and_(dword_ptr(esp, 0), ~0x84); // and [esp],~0x84 + a.or_(ptr(esp, 0), eax); // or [esp],eax + a.popfd(); // popf } } } @@ -4977,28 +4131,28 @@ void drcbe_x86::op_muls(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_m32_imm(dst, MBD(REG_ESP, 24), inst.flags()); // mov [esp+24],flags - emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p + a.mov(dword_ptr(esp, 24), inst.flags()); // mov [esp+24],flags + emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p + emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_hi) - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo + a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // push [esp+4],&reshi - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo - emit_call(dst, (x86code *)dmuls); // call dmuls + a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi + a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo + a.call(imm(dmuls)); // call dmuls if (inst.flags() != 0) - emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4] - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi - emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax + a.push(ptr(u64(flags_unmap), eax, 2)); // push flags_unmap[eax*4] + a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo + a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi + emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_hi) { - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi - emit_mov_p64_r64(dst, edstp, REG_EAX, REG_EDX); // mov edstp,edx:eax + a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo + a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi + emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax } if (inst.flags() != 0) - emit_popf(dst); // popf + a.popfd(); // popf } } @@ -5007,7 +4161,7 @@ void drcbe_x86::op_muls(x86code *&dst, const instruction &inst) // op_divu - process a DIVU opcode //------------------------------------------------- -void drcbe_x86::op_divu(x86code *&dst, const instruction &inst) +void drcbe_x86::op_divu(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5025,50 +4179,51 @@ void drcbe_x86::op_divu(x86code *&dst, const instruction &inst) if (inst.size() == 4) { // general case - emit_mov_r32_p32(dst, REG_ECX, src2p); // mov ecx,src2p + emit_mov_r32_p32(a, ecx, src2p); // mov ecx,src2p if (inst.flags() != 0) { - emit_mov_r32_imm(dst, REG_EAX, 0xa0000000); // mov eax,0xa0000000 - emit_add_r32_r32(dst, REG_EAX, REG_EAX); // add eax,eax + a.mov(eax, 0xa0000000); // mov eax,0xa0000000 + a.add(eax, eax); // add eax,eax } - emit_link skip; - emit_jecxz_link(dst, skip); // jecxz skip - emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p - emit_xor_r32_r32(dst, REG_EDX, REG_EDX); // xor edx,edx - emit_div_r32(dst, REG_ECX); // div ecx - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + Label skip = a.newLabel(); + a.jecxz(skip); // jecxz skip + emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p + a.xor_(edx, edx); // xor edx,edx + a.div(ecx); // div ecx + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_rem) - emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx + emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx if (inst.flags() != 0) - emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax - track_resolve_link(dst, skip); // skip: + a.test(eax, eax); // test eax,eax + a.bind(skip); // skip: + reset_last_upper_lower_reg(); } // 64-bit form else if (inst.size() == 8) { // general case - emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p + emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p + emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_rem) - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo + a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // push [esp+4],&reshi - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo - emit_call(dst, (x86code *)ddivu); // call ddivu + a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi + a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo + a.call(imm(ddivu)); // call ddivu if (inst.flags() != 0) - emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4] - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi - emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax + a.push(ptr(u64(flags_unmap), eax, 2)); // push flags_unmap[eax*4] + a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo + a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi + emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_rem) { - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi - emit_mov_p64_r64(dst, edstp, REG_EAX, REG_EDX); // mov edstp,edx:eax + a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo + a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi + emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax } if (inst.flags() != 0) - emit_popf(dst); // popf + a.popfd(); // popf } } @@ -5077,7 +4232,7 @@ void drcbe_x86::op_divu(x86code *&dst, const instruction &inst) // op_divs - process a DIVS opcode //------------------------------------------------- -void drcbe_x86::op_divs(x86code *&dst, const instruction &inst) +void drcbe_x86::op_divs(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5095,50 +4250,51 @@ void drcbe_x86::op_divs(x86code *&dst, const instruction &inst) if (inst.size() == 4) { // general case - emit_mov_r32_p32(dst, REG_ECX, src2p); // mov ecx,src2p + emit_mov_r32_p32(a, ecx, src2p); // mov ecx,src2p if (inst.flags() != 0) { - emit_mov_r32_imm(dst, REG_EAX, 0xa0000000); // mov eax,0xa0000000 - emit_add_r32_r32(dst, REG_EAX, REG_EAX); // add eax,eax + a.mov(eax, 0xa0000000); // mov eax,0xa0000000 + a.add(eax, eax); // add eax,eax } - emit_link skip; - emit_jecxz_link(dst, skip); // jecxz skip - emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p - emit_cdq(dst); // cdq - emit_idiv_r32(dst, REG_ECX); // idiv ecx - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + Label skip = a.newLabel(); + a.jecxz(skip); // jecxz skip + emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p + a.cdq(); // cdq + a.idiv(ecx); // idiv ecx + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_rem) - emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx + emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx if (inst.flags() != 0) - emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax - track_resolve_link(dst, skip); // skip: + a.test(eax, eax); // test eax,eax + a.bind(skip); // skip: + reset_last_upper_lower_reg(); } // 64-bit form else if (inst.size() == 8) { // general case - emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p + emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p + emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_rem) - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo + a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else - emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // push [esp+4],&reshi - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo - emit_call(dst, (x86code *)ddivs); // call ddivs + a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi + a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo + a.call(imm(ddivs)); // call ddivs if (inst.flags() != 0) - emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4] - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi - emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax + a.push(ptr(u64(flags_unmap), eax, 2)); // push flags_unmap[eax*4] + a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo + a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi + emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_rem) { - emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo - emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi - emit_mov_p64_r64(dst, edstp, REG_EAX, REG_EDX); // mov edstp,edx:eax + a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo + a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi + emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax } if (inst.flags() != 0) - emit_popf(dst); // popf + a.popfd(); // popf } } @@ -5147,7 +4303,7 @@ void drcbe_x86::op_divs(x86code *&dst, const instruction &inst) // op_and - process a AND opcode //------------------------------------------------- -void drcbe_x86::op_and(x86code *&dst, const instruction &inst) +void drcbe_x86::op_and(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5161,41 +4317,93 @@ void drcbe_x86::op_and(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_and_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // and [dstp],src2p + alu_op_param(a, Inst::kIdAnd, MABS(dstp.memory(), 4), src2p, // and [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && !src.immediate()) + { + a.mov(dst.as<Mem>(), imm(0)); + return true; + } + else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + return true; + + return false; + }); + + // dstp == src2p in memory + else if (dstp.is_memory() && dstp == src2p) + alu_op_param(a, Inst::kIdAnd, MABS(dstp.memory(), 4), src1p, // and [dstp],src1p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && !src.immediate()) + { + a.mov(dst.as<Mem>(), imm(0)); + return true; + } + else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + return true; + + return false; + }); // AND with immediate 0xff - else if (src2p.is_immediate_value(0xff) && inst.flags() == 0) + else if (src2p.is_immediate_value(0xff) && !inst.flags()) { if (src1p.is_int_register()) - emit_movzx_r32_r8(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p + { + if (src1p.ireg() & 4) + { + if (dstreg.id() != src1p.ireg()) + a.mov(dstreg, Gpd(src1p.ireg())); // mov dstreg,src1p + a.and_(dstreg, 0xff); // and dstreg,0xff + } + else + a.movzx(dstreg, GpbLo(src1p.ireg())); // movzx dstreg,src1p + } else if (src1p.is_memory()) - emit_movzx_r32_m8(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p] - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + a.movzx(dstreg, MABS(src1p.memory(), 1)); // movzx dstreg,[src1p] + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // AND with immediate 0xffff - else if (src2p.is_immediate_value(0xffff) && inst.flags() == 0) + else if (src2p.is_immediate_value(0xffff) && !inst.flags()) { if (src1p.is_int_register()) - emit_movzx_r32_r16(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p + a.movzx(dstreg, Gpw(src1p.ireg())); // movzx dstreg,src1p else if (src1p.is_memory()) - emit_movzx_r32_m16(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p] - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + a.movzx(dstreg, MABS(src1p.memory(), 2)); // movzx dstreg,[src1p] + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_and_r32_p32(dst, dstreg, src2p, inst); // and dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdAnd, dstreg, src2p, // and dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && !src.immediate()) + { + a.xor_(dst.as<Gpd>(), dst.as<Gpd>()); + return true; + } + else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + return true; + + return false; + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5204,79 +4412,107 @@ void drcbe_x86::op_and(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_and_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // and [dstp],src2p + emit_and_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // and [dstp],src2p + src2p, inst); + + // dstp == src2p in memory + else if (dstp.is_memory() && dstp == src2p) + emit_and_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // and [dstp],src1p + src1p, inst); // AND with immediate 0xff - else if (src2p.is_immediate_value(0xff) && inst.flags() == 0) + else if (src2p.is_immediate_value(0xff) && !inst.flags()) { if (src1p.is_int_register()) - emit_movzx_r32_r8(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p + { + if (src1p.ireg() & 4) + { + if (dstreg.id() != src1p.ireg()) + a.mov(dstreg, Gpd(src1p.ireg())); // mov dstreg,src1p + a.and_(dstreg, 0xff); // and dstreg,0xff + } + else + a.movzx(dstreg, GpbLo(src1p.ireg())); // movzx dstreg,src1p + } else if (src1p.is_memory()) - emit_movzx_r32_m8(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p] - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + a.movzx(dstreg, MABS(src1p.memory(), 1)); // movzx dstreg,[src1p] + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // AND with immediate 0xffff - else if (src2p.is_immediate_value(0xffff) && inst.flags() == 0) + else if (src2p.is_immediate_value(0xffff) && !inst.flags()) { if (src1p.is_int_register()) - emit_movzx_r32_r16(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p + a.movzx(dstreg, Gpw(src1p.ireg())); // movzx dstreg,src1p else if (src1p.is_memory()) - emit_movzx_r32_m16(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p] - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + a.movzx(dstreg, MABS(src1p.memory(), 2)); // movzx dstreg,[src1p] + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // AND with immediate 0xffffffff - else if (src2p.is_immediate_value(0xffffffff) && inst.flags() == 0) + else if (src2p.is_immediate_value(0xffffffffU) && !inst.flags()) { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // AND with immediate 0xffffffff00000000 - else if (src2p.is_immediate_value(0xffffffff00000000U) && inst.flags() == 0) + else if (src2p.is_immediate_value(0xffffffff00000000ULL) && !inst.flags()) { if (src1p != dstp) { - emit_mov_r64_p64(dst, REG_NONE, REG_EDX, src1p); // mov dstreg,src1p - emit_mov_p64_r64(dst, dstp, REG_NONE, REG_EDX); // mov dstp,dstreg + emit_mov_r64_p64(a, Gp(), edx, src1p); // mov dstreg,src1p + emit_mov_p64_r64(a, dstp, Gp(), edx); // mov dstp,dstreg } if (dstp.is_int_register()) - emit_xor_r32_r32(dst, dstp.ireg(), dstp.ireg()); // xor dstlo,dstlo + a.xor_(Gpd(dstp.ireg()), Gpd(dstp.ireg())); // xor dstlo,dstlo else if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory()), 0); // mov dstlo,0 + a.mov(MABS(dstp.memory(0), 4), 0); // mov dstlo,0 } // AND with immediate <= 0xffffffff - else if (src2p.is_immediate() && src2p.immediate() <= 0xffffffff && inst.flags() == 0) + else if (src2p.is_immediate() && src2p.immediate() <= 0xffffffffU && !inst.flags()) { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_and_r32_p32(dst, dstreg, src2p, inst); // and dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdAnd, dstreg, src2p, // and dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && !src.immediate()) + { + a.xor_(dst.as<Gpd>(), dst.as<Gpd>()); + return true; + } + else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + return true; + + return false; + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) - emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0 + a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) - emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0 + a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // general case else { - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_and_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // and dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_and_r64_p64(a, dstreg, edx, src2p, inst); // and edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -5286,7 +4522,7 @@ void drcbe_x86::op_and(x86code *&dst, const instruction &inst) // op_test - process a TEST opcode //------------------------------------------------- -void drcbe_x86::op_test(x86code *&dst, const instruction &inst) +void drcbe_x86::op_test(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5299,20 +4535,20 @@ void drcbe_x86::op_test(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // pick a target register for the general case - int src1reg = src1p.select_register(REG_EAX); + Gp const src1reg = src1p.select_register(eax); // 32-bit form if (inst.size() == 4) { // src1p in memory if (src1p.is_memory()) - emit_test_m32_p32(dst, MABS(src1p.memory()), src2p, inst); // test [src1p],src2p + alu_op_param(a, Inst::kIdTest, MABS(src1p.memory(), 4), src2p); // test [src1p],src2p // general case else { - emit_mov_r32_p32(dst, src1reg, src1p); // mov src1reg,src1p - emit_test_r32_p32(dst, src1reg, src2p, inst); // test src1reg,src2p + emit_mov_r32_p32(a, src1reg, src1p); // mov src1reg,src1p + alu_op_param(a, Inst::kIdTest, src1reg, src2p); // test src1reg,src2p } } @@ -5321,13 +4557,15 @@ void drcbe_x86::op_test(x86code *&dst, const instruction &inst) { // src1p in memory if (src1p.is_memory()) - emit_test_m64_p64(dst, MABS(src1p.memory()), src2p, inst); // test [dstp],src2p + alu_op_param(a, Inst::kIdTest, Inst::kIdTest, // test [dstp],src2p + MABS(src1p.memory(0), 4), MABS(src1p.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { - emit_mov_r64_p64(dst, src1reg, REG_EDX, src1p); // mov src1reg:dstp,[src1p] - emit_test_r64_p64(dst, src1reg, REG_EDX, src2p, inst); // test src1reg:dstp,src2p + emit_mov_r64_p64(a, src1reg, edx, src1p); // mov src1reg:dstp,[src1p] + alu_op_param(a, Inst::kIdTest, Inst::kIdTest, // test src1reg:dstp,src2p + src1reg, edx, src2p, inst.flags() & FLAG_Z); } } } @@ -5337,7 +4575,7 @@ void drcbe_x86::op_test(x86code *&dst, const instruction &inst) // op_or - process a OR opcode //------------------------------------------------- -void drcbe_x86::op_or(x86code *&dst, const instruction &inst) +void drcbe_x86::op_or(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5351,21 +4589,62 @@ void drcbe_x86::op_or(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_or_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // or [dstp],src2p + alu_op_param(a, Inst::kIdOr, MABS(dstp.memory(), 4), src2p, // or [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + { + a.mov(dst.as<Mem>(), imm(-1)); + return true; + } + else if (!inst.flags() && !src.immediate()) + return true; + + return false; + }); + // dstp == src2p in memory + else if (dstp.is_memory() && dstp == src2p) + alu_op_param(a, Inst::kIdOr, MABS(dstp.memory(), 4), src1p, // or [dstp],src1p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + { + a.mov(dst.as<Mem>(), imm(-1)); + return true; + } + else if (!inst.flags() && !src.immediate()) + return true; + return false; + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_or_r32_p32(dst, dstreg, src2p, inst); // or dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdOr, dstreg, src2p, // or dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + { + a.mov(dst.as<Gp>(), imm(-1)); + return true; + } + else if (!inst.flags() && !src.immediate()) + return true; + + return false; + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5374,14 +4653,20 @@ void drcbe_x86::op_or(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_or_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // or [dstp],src2p + emit_or_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // or [dstp],src2p + src2p, inst); + + // dstp == src2p in memory + else if (dstp.is_memory() && dstp == src2p) + emit_or_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // or [dstp],src1p + src1p, inst); // general case else { - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_or_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // or dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_or_r64_p64(a, dstreg, edx, src2p, inst); // or edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -5391,7 +4676,7 @@ void drcbe_x86::op_or(x86code *&dst, const instruction &inst) // op_xor - process a XOR opcode //------------------------------------------------- -void drcbe_x86::op_xor(x86code *&dst, const instruction &inst) +void drcbe_x86::op_xor(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5405,21 +4690,64 @@ void drcbe_x86::op_xor(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_xor_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // xor [dstp],src2p + alu_op_param(a, Inst::kIdXor, MABS(dstp.memory(), 4), src2p, // xor [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + { + a.not_(dst.as<Mem>()); + return true; + } + else if (!inst.flags() && !src.immediate()) + return true; + + return false; + }); + + // dstp == src2p in memory + else if (dstp.is_memory() && dstp == src2p) + alu_op_param(a, Inst::kIdXor, MABS(dstp.memory(), 4), src1p, // xor [dstp],src1p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + { + a.not_(dst.as<Mem>()); + return true; + } + else if (!inst.flags() && !src.immediate()) + return true; + + return false; + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_xor_r32_p32(dst, dstreg, src2p, inst); // xor dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + alu_op_param(a, Inst::kIdXor, dstreg, src2p, // xor dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize all-zero and all-one cases + if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) + { + a.not_(dst.as<Gp>()); + return true; + } + else if (!inst.flags() && !src.immediate()) + return true; + + return false; + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5428,14 +4756,20 @@ void drcbe_x86::op_xor(x86code *&dst, const instruction &inst) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_xor_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // xor [dstp],src2p + emit_xor_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // xor [dstp],src2p + src2p, inst); + + // dstp == src1p in memory + else if (dstp.is_memory() && dstp == src2p) + emit_xor_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // xor [dstp],src1p + src1p, inst); // general case else { - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_xor_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // xor dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_xor_r64_p64(a, dstreg, edx, src2p, inst); // xor edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } @@ -5445,7 +4779,7 @@ void drcbe_x86::op_xor(x86code *&dst, const instruction &inst) // op_lzcnt - process a LZCNT opcode //------------------------------------------------- -void drcbe_x86::op_lzcnt(x86code *&dst, const instruction &inst) +void drcbe_x86::op_lzcnt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5457,34 +4791,35 @@ void drcbe_x86::op_lzcnt(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // 32-bit form if (inst.size() == 4) { - emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,src1p - emit_mov_r32_imm(dst, REG_ECX, 32 ^ 31); // mov ecx,32 ^ 31 - emit_bsr_r32_r32(dst, dstreg, dstreg); // bsr dstreg,dstreg - emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx - emit_xor_r32_imm(dst, dstreg, 31); // xor dstreg,31 - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p + a.mov(ecx, 32 ^ 31); // mov ecx,32 ^ 31 + a.bsr(dstreg, dstreg); // bsr dstreg,dstreg + a.cmovz(dstreg, ecx); // cmovz dstreg,ecx + a.xor_(dstreg, 31); // xor dstreg,31 + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { - emit_mov_r64_p64(dst, REG_EDX, dstreg, srcp); // mov dstreg:edx,srcp - emit_bsr_r32_r32(dst, dstreg, dstreg); // bsr dstreg,dstreg - emit_link skip; - emit_jcc_short_link(dst, x86emit::COND_NZ, skip); // jnz skip - emit_mov_r32_imm(dst, REG_ECX, 32 ^ 31); // mov ecx,32 ^ 31 - emit_bsr_r32_r32(dst, dstreg, REG_EDX); // bsr dstreg,edx - emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx - emit_add_r32_imm(dst, REG_ECX, 32); // add ecx,32 - track_resolve_link(dst, skip); // skip: - emit_xor_r32_r32(dst, REG_EDX, REG_EDX); // xor edx,edx - emit_xor_r32_imm(dst, dstreg, 31); // xor dstreg,31 - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg + emit_mov_r64_p64(a, edx, dstreg, srcp); // mov dstreg:edx,srcp + a.bsr(dstreg, dstreg); // bsr dstreg,dstreg + Label skip = a.newLabel(); + a.jnz(skip); // jnz skip + a.mov(ecx, 32 ^ 31); // mov ecx,32 ^ 31 + a.bsr(dstreg, edx); // bsr dstreg,edx + a.cmovz(dstreg, ecx); // cmovz dstreg,ecx + a.add(ecx, 32); // add ecx,32 + a.bind(skip); // skip: + reset_last_upper_lower_reg(); + a.xor_(edx, edx); // xor edx,edx + a.xor_(dstreg, 31); // xor dstreg,31 + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5493,7 +4828,7 @@ void drcbe_x86::op_lzcnt(x86code *&dst, const instruction &inst) // op_tzcnt - process a TZCNT opcode //------------------------------------------------- -void drcbe_x86::op_tzcnt(x86code *&dst, const instruction &inst) +void drcbe_x86::op_tzcnt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5504,32 +4839,33 @@ void drcbe_x86::op_tzcnt(x86code *&dst, const instruction &inst) be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // 32-bit form if (inst.size() == 4) { - emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,src1p - emit_mov_r32_imm(dst, REG_ECX, 32); // mov ecx,32 - emit_bsf_r32_r32(dst, dstreg, dstreg); // bsf dstreg,dstreg - emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p + a.mov(ecx, 32); // mov ecx,32 + a.bsf(dstreg, dstreg); // bsf dstreg,dstreg + a.cmovz(dstreg, ecx); // cmovz dstreg,ecx + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { - emit_link skip; - emit_mov_r64_p64(dst, REG_EDX, dstreg, srcp); // mov dstreg:edx,srcp - emit_bsf_r32_r32(dst, dstreg, dstreg); // bsf dstreg,dstreg - emit_jcc_short_link(dst, x86emit::COND_NZ, skip); // jnz skip - emit_mov_r32_imm(dst, REG_ECX, 32); // mov ecx,32 - emit_bsf_r32_r32(dst, dstreg, REG_EDX); // bsf dstreg,edx - emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx - emit_add_r32_imm(dst, dstreg, 32); // add dstreg,32 - track_resolve_link(dst, skip); // skip: - emit_xor_r32_r32(dst, REG_EDX, REG_EDX); // xor edx,edx - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg + Label skip = a.newLabel(); + emit_mov_r64_p64(a, edx, dstreg, srcp); // mov dstreg:edx,srcp + a.bsf(dstreg, dstreg); // bsf dstreg,dstreg + a.jz(skip); // jnz skip + a.mov(ecx, 32); // mov ecx,32 + a.bsf(dstreg, edx); // bsf dstreg,edx + a.cmovz(dstreg, ecx); // cmovz dstreg,ecx + a.add(dstreg, 32); // add dstreg,32 + a.bind(skip); // skip: + reset_last_upper_lower_reg(); + a.xor_(edx, edx); // xor edx,edx + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5538,7 +4874,7 @@ void drcbe_x86::op_tzcnt(x86code *&dst, const instruction &inst) // op_bswap - process a BSWAP opcode //------------------------------------------------- -void drcbe_x86::op_bswap(x86code *&dst, const instruction &inst) +void drcbe_x86::op_bswap(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5550,36 +4886,36 @@ void drcbe_x86::op_bswap(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX); + Gp const dstreg = dstp.select_register(eax); // 32-bit form if (inst.size() == 4) { - emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,src1p - emit_bswap_r32(dst, dstreg); // bswap dstreg + emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p + a.bswap(dstreg); // bswap dstreg if (inst.flags() != 0) - emit_test_r32_r32(dst, dstreg, dstreg); // test dstreg,dstreg - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + a.test(dstreg, dstreg); // test dstreg,dstreg + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { - emit_mov_r64_p64(dst, REG_EDX, dstreg, srcp); // mov dstreg:edx,srcp - emit_bswap_r32(dst, dstreg); // bswap dstreg - emit_bswap_r32(dst, REG_EDX); // bswap edx - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg + emit_mov_r64_p64(a, edx, dstreg, srcp); // mov dstreg:edx,srcp + a.bswap(dstreg); // bswap dstreg + a.bswap(edx); // bswap edx + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg if (inst.flags() == FLAG_Z) - emit_or_r32_r32(dst, REG_EDX, dstreg); // or edx,eax + a.or_(edx, dstreg); // or edx,eax else if (inst.flags() == FLAG_S) - emit_test_r32_r32(dst, REG_EDX, REG_EDX); // test edx,edx + a.test(edx, edx); // test edx,edx else { - emit_movzx_r32_r16(dst, REG_ECX, dstreg); // movzx ecx,dstreg - emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx - emit_mov_r32_r32(dst, REG_ECX, dstreg); // mov ecx,dstreg - emit_shr_r32_imm(dst, REG_ECX, 16); // shr ecx,16 - emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx + a.movzx(ecx, dstreg.r16()); // movzx ecx,dstreg + a.or_(edx, ecx); // or edx,ecx + a.mov(ecx, dstreg); // mov ecx,dstreg + a.shr(ecx, 16); // shr ecx,16 + a.or_(edx, ecx); // or edx,ecx } } } @@ -5589,7 +4925,7 @@ void drcbe_x86::op_bswap(x86code *&dst, const instruction &inst) // op_shl - process a SHL opcode //------------------------------------------------- -void drcbe_x86::op_shl(x86code *&dst, const instruction &inst) +void drcbe_x86::op_shl(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5602,21 +4938,31 @@ void drcbe_x86::op_shl(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_shl_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // shl [dstp],src2p + shift_op_param(a, Inst::kIdShl, MABS(dstp.memory(), 4), src2p, // shl [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_shl_r32_p32(dst, dstreg, src2p, inst); // shl dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdShl, dstreg, src2p, // shl dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5624,9 +4970,9 @@ void drcbe_x86::op_shl(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_shl_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // shl dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_shl_r64_p64(a, dstreg, edx, src2p, inst); // shl edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5635,7 +4981,7 @@ void drcbe_x86::op_shl(x86code *&dst, const instruction &inst) // op_shr - process a SHR opcode //------------------------------------------------- -void drcbe_x86::op_shr(x86code *&dst, const instruction &inst) +void drcbe_x86::op_shr(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5648,21 +4994,31 @@ void drcbe_x86::op_shr(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_shr_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // shr [dstp],src2p + shift_op_param(a, Inst::kIdShr, MABS(dstp.memory(), 4), src2p, // shr [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_shr_r32_p32(dst, dstreg, src2p, inst); // shr dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdShr, dstreg, src2p, // shr dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5670,9 +5026,9 @@ void drcbe_x86::op_shr(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_shr_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // shr dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_shr_r64_p64(a, dstreg, edx, src2p, inst); // shr edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5681,7 +5037,7 @@ void drcbe_x86::op_shr(x86code *&dst, const instruction &inst) // op_sar - process a SAR opcode //------------------------------------------------- -void drcbe_x86::op_sar(x86code *&dst, const instruction &inst) +void drcbe_x86::op_sar(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5694,21 +5050,31 @@ void drcbe_x86::op_sar(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_sar_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // sar [dstp],src2p + shift_op_param(a, Inst::kIdSar, MABS(dstp.memory(), 4), src2p, // sar [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_sar_r32_p32(dst, dstreg, src2p, inst); // sar dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdSar, dstreg, src2p, // sar dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5716,9 +5082,9 @@ void drcbe_x86::op_sar(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_sar_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // sar dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_sar_r64_p64(a, dstreg, edx, src2p, inst); // sar edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5727,7 +5093,7 @@ void drcbe_x86::op_sar(x86code *&dst, const instruction &inst) // op_rol - process a rol opcode //------------------------------------------------- -void drcbe_x86::op_rol(x86code *&dst, const instruction &inst) +void drcbe_x86::op_rol(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5740,21 +5106,31 @@ void drcbe_x86::op_rol(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_rol_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // rol [dstp],src2p + shift_op_param(a, Inst::kIdRol, MABS(dstp.memory(), 4), src2p, // rol [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_rol_r32_p32(dst, dstreg, src2p, inst); // rol dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdRol, dstreg, src2p, // rol dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5762,9 +5138,9 @@ void drcbe_x86::op_rol(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_rol_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // rol dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_rol_r64_p64(a, dstreg, edx, src2p, inst); // rol edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5773,7 +5149,7 @@ void drcbe_x86::op_rol(x86code *&dst, const instruction &inst) // op_ror - process a ROR opcode //------------------------------------------------- -void drcbe_x86::op_ror(x86code *&dst, const instruction &inst) +void drcbe_x86::op_ror(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5786,21 +5162,31 @@ void drcbe_x86::op_ror(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_ror_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // ror [dstp],src2p + shift_op_param(a, Inst::kIdRor, MABS(dstp.memory(), 4), src2p, // ror [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p - emit_ror_r32_p32(dst, dstreg, src2p, inst); // ror dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdRor, dstreg, src2p, // rol dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5808,9 +5194,9 @@ void drcbe_x86::op_ror(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_ror_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // ror dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_ror_r64_p64(a, dstreg, edx, src2p, inst); // ror edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5819,7 +5205,7 @@ void drcbe_x86::op_ror(x86code *&dst, const instruction &inst) // op_rolc - process a ROLC opcode //------------------------------------------------- -void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst) +void drcbe_x86::op_rolc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5832,21 +5218,31 @@ void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_rcl_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // rcl [dstp],src2p + shift_op_param(a, Inst::kIdRcl, MABS(dstp.memory(), 4), src2p, // rcl [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p - emit_rcl_r32_p32(dst, dstreg, src2p, inst); // rcl dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdRcl, dstreg, src2p, // rcl dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5854,9 +5250,9 @@ void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_rcl_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // rcl dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_rcl_r64_p64(a, dstreg, edx, src2p, inst); // rcl edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5865,7 +5261,7 @@ void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst) // op_rorc - process a RORC opcode //------------------------------------------------- -void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst) +void drcbe_x86::op_rorc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5878,21 +5274,31 @@ void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case - int dstreg = dstp.select_register(REG_EAX, src2p); + Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) - emit_rcr_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // rcr [dstp],src2p + shift_op_param(a, Inst::kIdRcr, MABS(dstp.memory(), 4), src2p, // rcr [dstp],src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); // general case else { - emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p - emit_rcr_r32_p32(dst, dstreg, src2p, inst); // rcr dstreg,src2p - emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg + emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p + shift_op_param(a, Inst::kIdRcr, dstreg, src2p, // rcr dstreg,src2p + [inst](Assembler &a, Operand const &dst, be_parameter const &src) + { + // optimize zero case + return (!inst.flags() && !src.immediate()); + }); + emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } @@ -5900,9 +5306,9 @@ void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst) else if (inst.size() == 8) { // general case - emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p] - emit_rcr_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // rcr dstreg:dstp,src2p - emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax + emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] + emit_rcr_r64_p64(a, dstreg, edx, src2p, inst); // rcr edx:dstreg,src2p + emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } @@ -5916,7 +5322,7 @@ void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst) // op_fload - process a FLOAD opcode //------------------------------------------------- -void drcbe_x86::op_fload(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fload(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5931,25 +5337,25 @@ void drcbe_x86::op_fload(x86code *&dst, const instruction &inst) // immediate index if (indp.is_immediate()) { - emit_mov_r32_m32(dst, REG_EAX, MABS(basep.memory(4*indp.immediate()))); // mov eax,[basep + 4*indp] + a.mov(eax, MABS(basep.memory(4*indp.immediate()))); // mov eax,[basep + 4*indp] if (inst.size() == 8) - emit_mov_r32_m32(dst, REG_EDX, MABS(basep.memory(4 + 4*indp.immediate()))); // mov edx,[basep + 4*indp + 4] + a.mov(edx, MABS(basep.memory(4 + 4*indp.immediate()))); // mov edx,[basep + 4*indp + 4] } // other index else { - int indreg = indp.select_register(REG_ECX); - emit_mov_r32_p32(dst, indreg, indp); - emit_mov_r32_m32(dst, REG_EAX, MABSI(basep.memory(), indreg, 4)); // mov eax,[basep + 4*indp] + Gp const indreg = indp.select_register(ecx); + emit_mov_r32_p32(a, indreg, indp); + a.mov(eax, ptr(u64(basep.memory(0)), indreg, 2)); // mov eax,[basep + 4*indp] if (inst.size() == 8) - emit_mov_r32_m32(dst, REG_EDX, MABSI(basep.memory(4), indreg, 4)); // mov edx,[basep + 4*indp + 4] + a.mov(edx, ptr(u64(basep.memory(4)), indreg, 2)); // mov edx,[basep + 4*indp + 4] } // general case - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax + a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax if (inst.size() == 8) - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp + 4],edx + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp + 4],edx } @@ -5957,7 +5363,7 @@ void drcbe_x86::op_fload(x86code *&dst, const instruction &inst) // op_fstore - process a FSTORE opcode //------------------------------------------------- -void drcbe_x86::op_fstore(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fstore(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -5970,26 +5376,26 @@ void drcbe_x86::op_fstore(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(2), PTYPE_MF); // general case - emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp] + a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] if (inst.size() == 8) - emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp + 4] + a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp + 4] // immediate index if (indp.is_immediate()) { - emit_mov_m32_r32(dst, MABS(basep.memory(4*indp.immediate())), REG_EAX); // mov [basep + 4*indp],eax + a.mov(MABS(basep.memory(4*indp.immediate())), eax); // mov [basep + 4*indp],eax if (inst.size() == 8) - emit_mov_m32_r32(dst, MABS(basep.memory(4 + 4*indp.immediate())), REG_EDX); // mov [basep + 4*indp + 4],edx + a.mov(MABS(basep.memory(4 + 4*indp.immediate())), edx); // mov [basep + 4*indp + 4],edx } // other index else { - int indreg = indp.select_register(REG_ECX); - emit_mov_r32_p32(dst, indreg, indp); - emit_mov_m32_r32(dst, MABSI(basep.memory(), indreg, 4), REG_EAX); // mov [basep + 4*indp],eax + Gp const indreg = indp.select_register(ecx); + emit_mov_r32_p32(a, indreg, indp); + a.mov(ptr(u64(basep.memory(0)), indreg, 2), eax); // mov [basep + 4*indp],eax if (inst.size() == 8) - emit_mov_m32_r32(dst, MABSI(basep.memory(4), indreg, 4), REG_EDX); // mov [basep + 4*indp + 4],edx + a.mov(ptr(u64(basep.memory(4)), indreg, 2), edx); // mov [basep + 4*indp + 4],edx } } @@ -5998,7 +5404,7 @@ void drcbe_x86::op_fstore(x86code *&dst, const instruction &inst) // op_fread - process a FREAD opcode //------------------------------------------------- -void drcbe_x86::op_fread(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fread(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6008,23 +5414,23 @@ void drcbe_x86::op_fread(x86code *&dst, const instruction &inst) // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter addrp(*this, inst.param(1), PTYPE_MRI); - const parameter &spacep = inst.param(2); + parameter const &spacep = inst.param(2); assert(spacep.is_size_space()); assert((1 << spacep.size()) == inst.size()); // set up a call to the read dword/qword handler - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacep.space()]); // mov [esp],space + emit_mov_m32_p32(a, dword_ptr(esp, 4), addrp); // mov [esp+4],addrp + a.mov(dword_ptr(esp, 0), imm(m_space[spacep.space()])); // mov [esp],space if (inst.size() == 4) - emit_call(dst, (x86code *)m_accessors[spacep.space()].read_dword); // call read_dword + a.call(imm(m_accessors[spacep.space()].read_dword)); // call read_dword else if (inst.size() == 8) - emit_call(dst, (x86code *)m_accessors[spacep.space()].read_qword); // call read_qword + a.call(imm(m_accessors[spacep.space()].read_qword)); // call read_qword // store result if (inst.size() == 4) - emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax + emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax else if (inst.size() == 8) - emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax + emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax } @@ -6032,7 +5438,7 @@ void drcbe_x86::op_fread(x86code *&dst, const instruction &inst) // op_fwrite - process a FWRITE opcode //------------------------------------------------- -void drcbe_x86::op_fwrite(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fwrite(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6042,21 +5448,21 @@ void drcbe_x86::op_fwrite(x86code *&dst, const instruction &inst) // normalize parameters be_parameter addrp(*this, inst.param(0), PTYPE_MRI); be_parameter srcp(*this, inst.param(1), PTYPE_MF); - const parameter &spacep = inst.param(2); + parameter const &spacep = inst.param(2); assert(spacep.is_size_space()); assert((1 << spacep.size()) == inst.size()); // set up a call to the write dword/qword handler if (inst.size() == 4) - emit_mov_m32_p32(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp + emit_mov_m32_p32(a, dword_ptr(esp, 8), srcp); // mov [esp+8],srcp else if (inst.size() == 8) - emit_mov_m64_p64(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp - emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp - emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacep.space()]); // mov [esp],space + emit_mov_m64_p64(a, qword_ptr(esp, 8), srcp); // mov [esp+8],srcp + emit_mov_m32_p32(a, dword_ptr(esp, 4), addrp); // mov [esp+4],addrp + a.mov(dword_ptr(esp, 0), imm(m_space[spacep.space()])); // mov [esp],space if (inst.size() == 4) - emit_call(dst, (x86code *)m_accessors[spacep.space()].write_dword); // call write_dword + a.call(imm(m_accessors[spacep.space()].write_dword)); // call write_dword else if (inst.size() == 8) - emit_call(dst, (x86code *)m_accessors[spacep.space()].write_qword); // call write_qword + a.call(imm(m_accessors[spacep.space()].write_qword)); // call write_qword } @@ -6064,7 +5470,7 @@ void drcbe_x86::op_fwrite(x86code *&dst, const instruction &inst) // op_fmov - process a FMOV opcode //------------------------------------------------- -void drcbe_x86::op_fmov(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fmov(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6076,21 +5482,24 @@ void drcbe_x86::op_fmov(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // always start with a jmp - emit_link skip = { nullptr }; + Label skip = a.newLabel(); if (inst.condition() != uml::COND_ALWAYS) - emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + a.j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip // general case - emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp] + a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] if (inst.size() == 8) - emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp + 4] - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax + a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp + 4] + a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax if (inst.size() == 8) - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp + 4],edx + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp + 4],edx // resolve the jump - if (skip.target != nullptr) - track_resolve_link(dst, skip); // skip: + if (inst.condition() != uml::COND_ALWAYS) + { + a.bind(skip); // skip: + reset_last_upper_lower_reg(); + } } @@ -6098,7 +5507,7 @@ void drcbe_x86::op_fmov(x86code *&dst, const instruction &inst) // op_ftoint - process a FTOINT opcode //------------------------------------------------- -void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst) +void drcbe_x86::op_ftoint(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6108,20 +5517,20 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst) // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MF); - const parameter &sizep = inst.param(2); + parameter const &sizep = inst.param(2); assert(sizep.is_size()); - const parameter &roundp = inst.param(3); + parameter const &roundp = inst.param(3); assert(roundp.is_rounding()); // set rounding mode if necessary if (roundp.rounding() != ROUND_DEFAULT && (!m_sse3 || roundp.rounding() != ROUND_TRUNC)) { - emit_fstcw_m16(dst, MABS(&m_fmodesave)); // fstcw [fmodesave] - emit_fldcw_m16(dst, MABS(&fp_control[roundp.rounding()])); // fldcw fpcontrol[roundp] + a.fstcw(MABS(&m_fmodesave)); // fstcw [fmodesave] + a.fldcw(MABS(&fp_control[roundp.rounding()])); // fldcw fpcontrol[roundp] } // general case - emit_fld_p(dst, inst.size(), srcp); // fld srcp + emit_fld_p(a, inst.size(), srcp); // fld srcp // 4-byte integer case if (sizep.size() == SIZE_DWORD) @@ -6129,17 +5538,17 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst) if (dstp.is_memory()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) - emit_fistp_m32(dst, MABS(dstp.memory())); // fistp [dstp] + a.fistp(MABS(dstp.memory(), 4)); // fistp [dstp] else - emit_fisttp_m32(dst, MABS(dstp.memory())); // fisttp [dstp] + a.fisttp(MABS(dstp.memory(), 4)); // fisttp [dstp] } else if (dstp.is_int_register()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) - emit_fistp_m32(dst, MABS(m_reglo[dstp.ireg()])); // fistp reglo[dstp] + a.fistp(MABS(m_reglo[dstp.ireg()], 4)); // fistp reglo[dstp] else - emit_fisttp_m32(dst, MABS(m_reglo[dstp.ireg()])); // fisttp reglo[dstp] - emit_mov_r32_m32(dst, dstp.ireg(), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp] + a.fisttp(MABS(m_reglo[dstp.ireg()], 4)); // fisttp reglo[dstp] + a.mov(Gpd(dstp.ireg()), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp] } } @@ -6149,23 +5558,23 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst) if (dstp.is_memory()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) - emit_fistp_m64(dst, MABS(dstp.memory())); // fistp [dstp] + a.fistp(MABS(dstp.memory(), 8)); // fistp [dstp] else - emit_fisttp_m64(dst, MABS(dstp.memory())); // fisttp [dstp] + a.fisttp(MABS(dstp.memory(), 8)); // fisttp [dstp] } else if (dstp.is_int_register()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) - emit_fistp_m64(dst, MABS(m_reglo[dstp.ireg()])); // fistp reglo[dstp] + a.fistp(MABS(m_reglo[dstp.ireg()], 8)); // fistp reglo[dstp] else - emit_fisttp_m64(dst, MABS(m_reglo[dstp.ireg()])); // fisttp reglo[dstp] - emit_mov_r32_m32(dst, dstp.ireg(), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp] + a.fisttp(MABS(m_reglo[dstp.ireg()], 8)); // fisttp reglo[dstp] + a.mov(Gpd(dstp.ireg()), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp] } } // restore control word and proceed if (roundp.rounding() != ROUND_DEFAULT && (!m_sse3 || roundp.rounding() != ROUND_TRUNC)) - emit_fldcw_m16(dst, MABS(&m_fmodesave)); // fldcw [fmodesave] + a.fldcw(MABS(&m_fmodesave)); // fldcw [fmodesave] } @@ -6173,7 +5582,7 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst) // op_ffrint - process a FFRINT opcode //------------------------------------------------- -void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst) +void drcbe_x86::op_ffrint(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6183,7 +5592,7 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst) // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); - const parameter &sizep = inst.param(2); + parameter const &sizep = inst.param(2); assert(sizep.is_size()); // 4-byte integer case @@ -6191,15 +5600,15 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst) { if (srcp.is_immediate()) { - emit_mov_m32_imm(dst, MABS(&m_fptemp), srcp.immediate()); // mov [fptemp],srcp - emit_fild_m32(dst, MABS(&m_fptemp)); // fild [fptemp] + a.mov(MABS(&m_fptemp, 4), srcp.immediate()); // mov [fptemp],srcp + a.fild(MABS(&m_fptemp, 4)); // fild [fptemp] } else if (srcp.is_memory()) - emit_fild_m32(dst, MABS(srcp.memory())); // fild [srcp] + a.fild(MABS(srcp.memory(), 4)); // fild [srcp] else if (srcp.is_int_register()) { - emit_mov_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), srcp.ireg()); // mov reglo[srcp],srcp - emit_fild_m32(dst, MABS(m_reglo[srcp.ireg()])); // fild reglo[srcp] + a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov reglo[srcp],srcp + a.fild(MABS(m_reglo[srcp.ireg()], 4)); // fild reglo[srcp] } } @@ -6208,21 +5617,21 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst) { if (srcp.is_immediate()) { - emit_mov_m32_imm(dst, MABS(&m_fptemp), srcp.immediate()); // mov [fptemp],srcp - emit_mov_m32_imm(dst, MABS((uint8_t *)&m_fptemp + 4), srcp.immediate()); // mov [fptemp+4],srcp - emit_fild_m64(dst, MABS(&m_fptemp)); // fild [fptemp] + a.mov(MABS(&m_fptemp, 4), srcp.immediate()); // mov [fptemp],srcp + a.mov(MABS((uint8_t *)&m_fptemp + 4, 4), srcp.immediate()); // mov [fptemp+4],srcp + a.fild(MABS(&m_fptemp, 8)); // fild [fptemp] } else if (srcp.is_memory()) - emit_fild_m64(dst, MABS(srcp.memory())); // fild [srcp] + a.fild(MABS(srcp.memory(), 8)); // fild [srcp] else if (srcp.is_int_register()) { - emit_mov_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), srcp.ireg()); // mov reglo[srcp],srcp - emit_fild_m64(dst, MABS(m_reglo[srcp.ireg()])); // fild reglo[srcp] + a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov reglo[srcp],srcp + a.fild(MABS(m_reglo[srcp.ireg()], 8)); // fild reglo[srcp] } } // store the result and be done - emit_fstp_p(dst, inst.size(), dstp); // fstp [dstp] + emit_fstp_p(a, inst.size(), dstp); // fstp [dstp] } @@ -6230,7 +5639,7 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst) // op_ffrflt - process a FFRFLT opcode //------------------------------------------------- -void drcbe_x86::op_ffrflt(x86code *&dst, const instruction &inst) +void drcbe_x86::op_ffrflt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6240,15 +5649,15 @@ void drcbe_x86::op_ffrflt(x86code *&dst, const instruction &inst) // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); - const parameter &sizep = inst.param(2); + parameter const &sizep = inst.param(2); assert(sizep.is_size()); // general case if (sizep.size() == SIZE_DWORD) - emit_fld_m32(dst, MABS(srcp.memory())); // fld [srcp] + a.fld(MABS(srcp.memory(), 4)); // fld [srcp] else if (sizep.size() == SIZE_QWORD) - emit_fld_m64(dst, MABS(srcp.memory())); // fld [srcp] - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + a.fld(MABS(srcp.memory(), 8)); // fld [srcp] + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6256,7 +5665,7 @@ void drcbe_x86::op_ffrflt(x86code *&dst, const instruction &inst) // op_frnds - process a FRNDS opcode //------------------------------------------------- -void drcbe_x86::op_frnds(x86code *&dst, const instruction &inst) +void drcbe_x86::op_frnds(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 8); @@ -6268,10 +5677,10 @@ void drcbe_x86::op_frnds(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), srcp); // fld srcp - emit_fstp_m32(dst, MABS(&m_fptemp)); // fstp [fptemp] - emit_fld_m32(dst, MABS(&m_fptemp)); // fld [fptemp] - emit_fstp_p(dst, inst.size(), dstp); // fstp [dstp] + emit_fld_p(a, inst.size(), srcp); // fld srcp + a.fstp(MABS(&m_fptemp, 4)); // fstp [fptemp] + a.fld(MABS(&m_fptemp, 4)); // fld [fptemp] + emit_fstp_p(a, inst.size(), dstp); // fstp [dstp] } @@ -6279,7 +5688,7 @@ void drcbe_x86::op_frnds(x86code *&dst, const instruction &inst) // op_fadd - process a FADD opcode //------------------------------------------------- -void drcbe_x86::op_fadd(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fadd(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6293,10 +5702,10 @@ void drcbe_x86::op_fadd(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // general case - emit_fld_p(dst, inst.size(), src1p); // fld src1p - emit_fld_p(dst, inst.size(), src2p); // fld src2p - emit_faddp(dst); // faddp - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), src1p); // fld src1p + emit_fld_p(a, inst.size(), src2p); // fld src2p + a.faddp(); // faddp + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6304,7 +5713,7 @@ void drcbe_x86::op_fadd(x86code *&dst, const instruction &inst) // op_fsub - process a FSUB opcode //------------------------------------------------- -void drcbe_x86::op_fsub(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fsub(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6317,10 +5726,10 @@ void drcbe_x86::op_fsub(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), src1p); // fld src1p - emit_fld_p(dst, inst.size(), src2p); // fld src2p - emit_fsubp(dst); // fsubp - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), src1p); // fld src1p + emit_fld_p(a, inst.size(), src2p); // fld src2p + a.fsubp(); // fsubp + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6328,7 +5737,7 @@ void drcbe_x86::op_fsub(x86code *&dst, const instruction &inst) // op_fcmp - process a FCMP opcode //------------------------------------------------- -void drcbe_x86::op_fcmp(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fcmp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6340,11 +5749,11 @@ void drcbe_x86::op_fcmp(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(1), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), src2p); // fld src2p - emit_fld_p(dst, inst.size(), src1p); // fld src1p - emit_fcompp(dst); // fcompp - emit_fstsw_ax(dst); // fnstsw ax - emit_sahf(dst); // sahf + emit_fld_p(a, inst.size(), src2p); // fld src2p + emit_fld_p(a, inst.size(), src1p); // fld src1p + a.fcompp(); // fcompp + a.fnstsw(ax); // fnstsw ax + a.sahf(); // sahf } @@ -6352,7 +5761,7 @@ void drcbe_x86::op_fcmp(x86code *&dst, const instruction &inst) // op_fmul - process a FMUL opcode //------------------------------------------------- -void drcbe_x86::op_fmul(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fmul(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6366,10 +5775,10 @@ void drcbe_x86::op_fmul(x86code *&dst, const instruction &inst) normalize_commutative(src1p, src2p); // general case - emit_fld_p(dst, inst.size(), src1p); // fld src1p - emit_fld_p(dst, inst.size(), src2p); // fld src2p - emit_fmulp(dst); // fmulp - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), src1p); // fld src1p + emit_fld_p(a, inst.size(), src2p); // fld src2p + a.fmulp(); // fmulp + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6377,7 +5786,7 @@ void drcbe_x86::op_fmul(x86code *&dst, const instruction &inst) // op_fdiv - process a FDIV opcode //------------------------------------------------- -void drcbe_x86::op_fdiv(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fdiv(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6390,10 +5799,10 @@ void drcbe_x86::op_fdiv(x86code *&dst, const instruction &inst) be_parameter src2p(*this, inst.param(2), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), src1p); // fld src1p - emit_fld_p(dst, inst.size(), src2p); // fld src2p - emit_fdivp(dst); // fdivp - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), src1p); // fld src1p + emit_fld_p(a, inst.size(), src2p); // fld src2p + a.fdivp(); // fdivp + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6401,7 +5810,7 @@ void drcbe_x86::op_fdiv(x86code *&dst, const instruction &inst) // op_fneg - process a FNEG opcode //------------------------------------------------- -void drcbe_x86::op_fneg(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fneg(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6413,9 +5822,9 @@ void drcbe_x86::op_fneg(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), srcp); // fld srcp - emit_fchs(dst); // fchs - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), srcp); // fld srcp + a.fchs(); // fchs + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6423,7 +5832,7 @@ void drcbe_x86::op_fneg(x86code *&dst, const instruction &inst) // op_fabs - process a FABS opcode //------------------------------------------------- -void drcbe_x86::op_fabs(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fabs(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6435,9 +5844,9 @@ void drcbe_x86::op_fabs(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), srcp); // fld srcp - emit_fabs(dst); // fabs - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), srcp); // fld srcp + a.fabs(); // fabs + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6445,7 +5854,7 @@ void drcbe_x86::op_fabs(x86code *&dst, const instruction &inst) // op_fsqrt - process a FSQRT opcode //------------------------------------------------- -void drcbe_x86::op_fsqrt(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fsqrt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6457,9 +5866,9 @@ void drcbe_x86::op_fsqrt(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case - emit_fld_p(dst, inst.size(), srcp); // fld srcp - emit_fsqrt(dst); // fsqrt - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + emit_fld_p(a, inst.size(), srcp); // fld srcp + a.fsqrt(); // fsqrt + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6467,7 +5876,7 @@ void drcbe_x86::op_fsqrt(x86code *&dst, const instruction &inst) // op_frecip - process a FRECIP opcode //------------------------------------------------- -void drcbe_x86::op_frecip(x86code *&dst, const instruction &inst) +void drcbe_x86::op_frecip(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6479,10 +5888,10 @@ void drcbe_x86::op_frecip(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case - emit_fld1(dst); // fld1 - emit_fld_p(dst, inst.size(), srcp); // fld srcp - emit_fdivp(dst); // fdivp - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + a.fld1(); // fld1 + emit_fld_p(a, inst.size(), srcp); // fld srcp + a.fdivp(); // fdivp + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6490,7 +5899,7 @@ void drcbe_x86::op_frecip(x86code *&dst, const instruction &inst) // op_frsqrt - process a FRSQRT opcode //------------------------------------------------- -void drcbe_x86::op_frsqrt(x86code *&dst, const instruction &inst) +void drcbe_x86::op_frsqrt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6502,11 +5911,11 @@ void drcbe_x86::op_frsqrt(x86code *&dst, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case - emit_fld1(dst); // fld1 - emit_fld_p(dst, inst.size(), srcp); // fld srcp - emit_fsqrt(dst); // fsqrt - emit_fdivp(dst); // fdivp - emit_fstp_p(dst, inst.size(), dstp); // fstp dstp + a.fld1(); // fld1 + emit_fld_p(a, inst.size(), srcp); // fld srcp + a.fsqrt(); // fsqrt + a.fdivp(); // fdivp + emit_fstp_p(a, inst.size(), dstp); // fstp dstp } @@ -6514,7 +5923,7 @@ void drcbe_x86::op_frsqrt(x86code *&dst, const instruction &inst) // op_fcopyi - process a FCOPYI opcode //------------------------------------------------- -void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst) +void drcbe_x86::op_fcopyi(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6530,12 +5939,12 @@ void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst) { if (srcp.is_memory()) { - emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp] - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax + a.mov(eax, MABS(srcp.memory())); // mov eax,[srcp] + a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax } else if (srcp.is_int_register()) { - emit_mov_m32_r32(dst, MABS(dstp.memory()), srcp.ireg()); // mov [dstp],srcp + a.mov(MABS(dstp.memory()), Gpd(srcp.ireg())); // mov [dstp],srcp } } @@ -6544,17 +5953,17 @@ void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst) { if (srcp.is_memory()) { - emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp] - emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp+4] + a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] + a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp+4] } else if (srcp.is_int_register()) { - emit_mov_r32_m32(dst, REG_EDX, MABS(m_reghi[srcp.ireg()])); // mov edx,[reghi[srcp]] - emit_mov_r32_r32(dst, REG_EAX, srcp.ireg()); // mov eax,srcp + a.mov(edx, MABS(m_reghi[srcp.ireg()])); // mov edx,[reghi[srcp]] + a.mov(eax, Gpd(srcp.ireg())); // mov eax,srcp } - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx + a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx } } @@ -6563,7 +5972,7 @@ void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst) // op_icopyf - process a ICOPYF opcode //------------------------------------------------- -void drcbe_x86::op_icopyf(x86code *&dst, const instruction &inst) +void drcbe_x86::op_icopyf(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); @@ -6577,33 +5986,33 @@ void drcbe_x86::op_icopyf(x86code *&dst, const instruction &inst) // 32-bit case if (inst.size() == 4) { - emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp] + a.mov(eax, MABS(srcp.memory())); // mov eax,[srcp] if (dstp.is_memory()) { - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax + a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax } else if (dstp.is_int_register()) { - emit_mov_r32_r32(dst, dstp.ireg(), REG_EAX); // mov dstp,eax + a.mov(Gpd(dstp.ireg()), eax); // mov dstp,eax } } // 64-bit case else if (inst.size() == 8) { - emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp] - emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp+4] + a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] + a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp+4] if (dstp.is_memory()) { - emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax - emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx + a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax + a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx } else { - emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi[dstp]],edx - emit_mov_r32_r32(dst, dstp.ireg(), REG_EAX); // mov dstp,eax + a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi[dstp]],edx + a.mov(Gpd(dstp.ireg()), eax); // mov dstp,eax } } } |