summaryrefslogtreecommitdiffstatshomepage
path: root/src/devices/cpu/drcbex86.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/devices/cpu/drcbex86.cpp')
-rw-r--r--src/devices/cpu/drcbex86.cpp6579
1 files changed, 3539 insertions, 3040 deletions
diff --git a/src/devices/cpu/drcbex86.cpp b/src/devices/cpu/drcbex86.cpp
index f1a39068ee1..e06f92350b2 100644
--- a/src/devices/cpu/drcbex86.cpp
+++ b/src/devices/cpu/drcbex86.cpp
@@ -82,26 +82,33 @@
**************************************************************************/
-#include <stddef.h>
#include "emu.h"
-#include "debugger.h"
-#include "emuopts.h"
-#include "drcuml.h"
#include "drcbex86.h"
-// This is a trick to make it build on Android where the x86 SDK declares ::REG_Exx
+#include "drcbeut.h"
+#include "x86log.h"
+
+#include "debug/debugcpu.h"
+#include "emuopts.h"
+
+#include "mfpresolve.h"
+
+#include "asmjit/src/asmjit/asmjit.h"
+
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+
+
namespace drc {
+
+namespace {
+
using namespace uml;
-using namespace x86emit;
-using x86emit::REG_EAX;
-using x86emit::REG_ECX;
-using x86emit::REG_EDX;
-using x86emit::REG_EBX;
-using x86emit::REG_ESP;
-using x86emit::REG_EBP;
-using x86emit::REG_ESI;
-using x86emit::REG_EDI;
+using namespace asmjit;
+using namespace asmjit::x86;
+
//**************************************************************************
// DEBUGGING
@@ -115,6 +122,12 @@ using x86emit::REG_EDI;
// CONSTANTS
//**************************************************************************
+#ifdef _WIN32
+constexpr bool USE_THISCALL = true;
+#else
+constexpr bool USE_THISCALL = false;
+#endif
+
const uint32_t PTYPE_M = 1 << parameter::PTYPE_MEMORY;
const uint32_t PTYPE_I = 1 << parameter::PTYPE_IMMEDIATE;
const uint32_t PTYPE_R = 1 << parameter::PTYPE_INT_REGISTER;
@@ -126,13 +139,57 @@ const uint32_t PTYPE_MRI = PTYPE_M | PTYPE_R | PTYPE_I;
const uint32_t PTYPE_MF = PTYPE_M | PTYPE_F;
+// size-to-mask table
+//const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU };
+
+// register mapping tables
+const Gp::Id int_register_map[REG_I_COUNT] =
+{
+ Gp::kIdBx, Gp::kIdSi, Gp::kIdDi, Gp::kIdBp
+};
+
+// flags mapping tables
+uint8_t flags_map[0x1000];
+uint32_t flags_unmap[0x20];
+
+// condition mapping table
+const CondCode condition_map[uml::COND_MAX - uml::COND_Z] =
+{
+ CondCode::kZ, // COND_Z = 0x80, requires Z
+ CondCode::kNZ, // COND_NZ, requires Z
+ CondCode::kS, // COND_S, requires S
+ CondCode::kNS, // COND_NS, requires S
+ CondCode::kC, // COND_C, requires C
+ CondCode::kNC, // COND_NC, requires C
+ CondCode::kO, // COND_V, requires V
+ CondCode::kNO, // COND_NV, requires V
+ CondCode::kP, // COND_U, requires U
+ CondCode::kNP, // COND_NU, requires U
+ CondCode::kA, // COND_A, requires CZ
+ CondCode::kBE, // COND_BE, requires CZ
+ CondCode::kG, // COND_G, requires SVZ
+ CondCode::kLE, // COND_LE, requires SVZ
+ CondCode::kL, // COND_L, requires SV
+ CondCode::kGE, // COND_GE, requires SV
+};
+
+// FPU control register mapping
+const uint16_t fp_control[4] =
+{
+ 0x0e3f, // ROUND_TRUNC
+ 0x023f, // ROUND_ROUND
+ 0x0a3f, // ROUND_CEIL
+ 0x063f // ROUND_FLOOR
+};
+
+
//**************************************************************************
// MACROS
//**************************************************************************
#define X86_CONDITION(condition) (condition_map[condition - uml::COND_Z])
-#define X86_NOT_CONDITION(condition) (condition_map[condition - uml::COND_Z] ^ 1)
+#define X86_NOT_CONDITION(condition) negateCond(condition_map[condition - uml::COND_Z])
#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS)
#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX))
@@ -142,52 +199,477 @@ const uint32_t PTYPE_MF = PTYPE_M | PTYPE_F;
//**************************************************************************
-// GLOBAL VARIABLES
+// MISCELLAENOUS FUNCTIONS
//**************************************************************************
-drcbe_x86::opcode_generate_func drcbe_x86::s_opcode_table[OP_MAX];
+void calculate_status_flags(Assembler &a, Operand const &dst, u8 flags)
+{
+ // calculate status flags in a way that does not modify any other status flags
+ uint32_t flagmask = 0;
-// size-to-mask table
-//static const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU };
+ if (flags & FLAG_C) flagmask |= 0x001;
+ if (flags & FLAG_V) flagmask |= 0x800;
+ if (flags & FLAG_Z) flagmask |= 0x040;
+ if (flags & FLAG_S) flagmask |= 0x080;
+ if (flags & FLAG_U) flagmask |= 0x004;
-// register mapping tables
-static const uint8_t int_register_map[REG_I_COUNT] =
+ if ((flags & (FLAG_Z | FLAG_S)) == flags)
+ {
+ Gp tempreg = dst.isMem() ? eax : dst.as<Gpd>().id() == ebx.id() ? eax : ebx;
+ Gp tempreg2 = dst.isMem() ? edx : dst.as<Gpd>().id() == ecx.id() ? edx : ecx;
+
+ if (dst.isMem())
+ {
+ a.push(tempreg2);
+ a.mov(tempreg2, dst.as<Mem>());
+ }
+
+ a.push(tempreg);
+
+ a.pushfd();
+ a.pop(tempreg);
+ a.and_(tempreg, ~flagmask);
+
+ a.add(dst.isMem() ? tempreg2.as<Gpd>() : dst.as<Gpd>(), 0);
+
+ a.pushfd();
+ a.and_(dword_ptr(esp), flagmask);
+ a.or_(dword_ptr(esp), tempreg);
+ a.popfd();
+
+ a.pop(tempreg);
+
+ if (dst.isMem())
+ a.pop(tempreg2);
+ }
+ else
+ {
+ fatalerror("drcbe_x86::calculate_status_flags: unknown flag combination requested: %02x\n", flags);
+ }
+}
+
+
+//-------------------------------------------------
+// dmulu - perform a double-wide unsigned multiply
+//-------------------------------------------------
+
+template <bool HalfmulFlags>
+int dmulu(uint64_t &dstlo, uint64_t &dsthi, uint64_t src1, uint64_t src2, bool flags)
{
- REG_EBX, REG_ESI, REG_EDI, REG_EBP
-};
+ // shortcut if we don't care about the high bits or the flags
+ if (&dstlo == &dsthi && !flags)
+ {
+ dstlo = src1 * src2;
+ return 0;
+ }
-// flags mapping tables
-static uint8_t flags_map[0x1000];
-static uint32_t flags_unmap[0x20];
+ if (!src1 || !src2)
+ {
+ dsthi = dstlo = 0;
+ return FLAG_Z;
+ }
-// condition mapping table
-static const uint8_t condition_map[uml::COND_MAX - uml::COND_Z] =
-{
- x86emit::COND_Z, // COND_Z = 0x80, requires Z
- x86emit::COND_NZ, // COND_NZ, requires Z
- x86emit::COND_S, // COND_S, requires S
- x86emit::COND_NS, // COND_NS, requires S
- x86emit::COND_C, // COND_C, requires C
- x86emit::COND_NC, // COND_NC, requires C
- x86emit::COND_O, // COND_V, requires V
- x86emit::COND_NO, // COND_NV, requires V
- x86emit::COND_P, // COND_U, requires U
- x86emit::COND_NP, // COND_NU, requires U
- x86emit::COND_A, // COND_A, requires CZ
- x86emit::COND_BE, // COND_BE, requires CZ
- x86emit::COND_G, // COND_G, requires SVZ
- x86emit::COND_LE, // COND_LE, requires SVZ
- x86emit::COND_L, // COND_L, requires SV
- x86emit::COND_GE, // COND_GE, requires SV
+ // compute high and low parts first
+ uint64_t lo = uint64_t(uint32_t(src1 >> 0)) * uint64_t(uint32_t(src2 >> 0));
+ uint64_t hi = uint64_t(uint32_t(src1 >> 32)) * uint64_t(uint32_t(src2 >> 32));
+
+ // compute middle parts
+ uint64_t prevlo = lo;
+ uint64_t temp = uint64_t(uint32_t(src1 >> 32)) * uint64_t(uint32_t(src2 >> 0));
+ lo += temp << 32;
+ hi += (temp >> 32) + (lo < prevlo);
+
+ prevlo = lo;
+ temp = uint64_t(uint32_t(src1 >> 0)) * uint64_t(uint32_t(src2 >> 32));
+ lo += temp << 32;
+ hi += (temp >> 32) + (lo < prevlo);
+
+ // store the results
+ dsthi = hi;
+ dstlo = lo;
+
+ if (HalfmulFlags)
+ return ((lo >> 60) & FLAG_S) | (hi ? FLAG_V : 0) | (!lo ? FLAG_Z : 0);
+ else
+ return ((hi >> 60) & FLAG_S) | (hi ? FLAG_V : 0) | ((!hi && !lo) ? FLAG_Z : 0);
+}
+
+
+//-------------------------------------------------
+// dmuls - perform a double-wide signed multiply
+//-------------------------------------------------
+
+template <bool HalfmulFlags>
+int dmuls(uint64_t &dstlo, uint64_t &dsthi, int64_t src1, int64_t src2, bool flags)
+{
+ uint64_t lo, hi, prevlo;
+ uint64_t a, b, temp;
+
+ // shortcut if we don't care about the high bits or the flags
+ if (&dstlo == &dsthi && !flags)
+ {
+ dstlo = src1 * src2;
+ return 0;
+ }
+
+ if (!src1 || !src2)
+ {
+ dsthi = dstlo = 0;
+ return FLAG_Z;
+ }
+
+ // fetch absolute source values
+ a = src1; if (int64_t(a) < 0) a = -a;
+ b = src2; if (int64_t(b) < 0) b = -b;
+
+ // compute high and low parts first
+ lo = uint64_t(uint32_t(a >> 0)) * uint64_t(uint32_t(b >> 0));
+ hi = uint64_t(uint32_t(a >> 32)) * uint64_t(uint32_t(b >> 32));
+
+ // compute middle parts
+ prevlo = lo;
+ temp = uint64_t(uint32_t(a >> 32)) * uint64_t(uint32_t(b >> 0));
+ lo += temp << 32;
+ hi += (temp >> 32) + (lo < prevlo);
+
+ prevlo = lo;
+ temp = uint64_t(uint32_t(a >> 0)) * uint64_t(uint32_t(b >> 32));
+ lo += temp << 32;
+ hi += (temp >> 32) + (lo < prevlo);
+
+ // adjust for signage
+ if (int64_t(src1 ^ src2) < 0)
+ {
+ hi = ~hi + (lo == 0);
+ lo = ~lo + 1;
+ }
+
+ // store the results
+ dsthi = hi;
+ dstlo = lo;
+
+ if (HalfmulFlags)
+ return ((lo >> 60) & FLAG_S) | ((hi != (int64_t(lo) >> 63)) ? FLAG_V : 0) | (!lo ? FLAG_Z : 0);
+ else
+ return ((hi >> 60) & FLAG_S) | ((hi != (int64_t(lo) >> 63)) ? FLAG_V : 0) | ((!hi && !lo) ? FLAG_Z : 0);
+}
+
+
+//-------------------------------------------------
+// ddivu - perform a double-wide unsigned divide
+//-------------------------------------------------
+
+int ddivu(uint64_t &dstlo, uint64_t &dsthi, uint64_t src1, uint64_t src2)
+{
+ // do nothing if src2 == 0
+ if (src2 == 0)
+ return FLAG_V;
+
+ dstlo = src1 / src2;
+ if (&dstlo != &dsthi)
+ dsthi = src1 % src2;
+ return ((dstlo == 0) << 2) | ((dstlo >> 60) & FLAG_S);
+}
+
+
+//-------------------------------------------------
+// ddivs - perform a double-wide signed divide
+//-------------------------------------------------
+
+int ddivs(uint64_t &dstlo, uint64_t &dsthi, int64_t src1, int64_t src2)
+{
+ // do nothing if src2 == 0
+ if (src2 == 0)
+ return FLAG_V;
+
+ dstlo = src1 / src2;
+ if (&dstlo != &dsthi)
+ dsthi = src1 % src2;
+ return ((dstlo == 0) << 2) | ((dstlo >> 60) & FLAG_S);
+}
+
+
+
+//**************************************************************************
+// TYPE DEFINITIONS
+//**************************************************************************
+
+class ThrowableErrorHandler : public ErrorHandler
+{
+public:
+ void handleError(Error err, const char *message, BaseEmitter *origin) override
+ {
+ throw emu_fatalerror("asmjit error %d: %s", err, message);
+ }
};
-// FPU control register mapping
-static const uint16_t fp_control[4] =
+
+class drcbe_x86 : public drcbe_interface
{
- 0x0e3f, // ROUND_TRUNC
- 0x023f, // ROUND_ROUND
- 0x0a3f, // ROUND_CEIL
- 0x063f // ROUND_FLOOR
+ using x86_entry_point_func = uint32_t (*)(x86code *entry);
+
+public:
+ // construction/destruction
+ drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits);
+ virtual ~drcbe_x86();
+
+ // required overrides
+ virtual void reset() override;
+ virtual int execute(uml::code_handle &entry) override;
+ virtual void generate(drcuml_block &block, const uml::instruction *instlist, uint32_t numinst) override;
+ virtual bool hash_exists(uint32_t mode, uint32_t pc) const noexcept override;
+ virtual void get_info(drcbe_info &info) const noexcept override;
+ virtual bool logging() const noexcept override { return m_log != nullptr; }
+
+private:
+ // HACK: leftover from x86emit
+ static inline constexpr int REG_MAX = 16;
+
+ // a be_parameter is similar to a uml::parameter but maps to native registers/memory
+ class be_parameter
+ {
+ public:
+ // parameter types
+ enum be_parameter_type
+ {
+ PTYPE_NONE = 0, // invalid
+ PTYPE_IMMEDIATE, // immediate; value = sign-extended to 64 bits
+ PTYPE_INT_REGISTER, // integer register; value = 0-REG_MAX
+ PTYPE_FLOAT_REGISTER, // floating point register; value = 0-REG_MAX
+ PTYPE_MEMORY, // memory; value = pointer to memory
+ PTYPE_MAX
+ };
+
+ // represents the value of a parameter
+ typedef uint64_t be_parameter_value;
+
+ // construction
+ be_parameter() : m_type(PTYPE_NONE), m_value(0) { }
+ be_parameter(uint64_t val) : m_type(PTYPE_IMMEDIATE), m_value(val) { }
+ be_parameter(drcbe_x86 &drcbe, const uml::parameter &param, uint32_t allowed);
+ be_parameter(const be_parameter &param) = default;
+
+ // creators for types that don't safely default
+ static be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); }
+ static be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); }
+ static be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(base)); }
+ static be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast<be_parameter_value>(const_cast<void *>(base))); }
+
+ // operators
+ bool operator==(be_parameter const &rhs) const { return (m_type == rhs.m_type && m_value == rhs.m_value); }
+ bool operator!=(be_parameter const &rhs) const { return (m_type != rhs.m_type || m_value != rhs.m_value); }
+
+ // getters
+ be_parameter_type type() const { return m_type; }
+ uint64_t immediate() const { assert(m_type == PTYPE_IMMEDIATE); return m_value; }
+ uint32_t ireg() const { assert(m_type == PTYPE_INT_REGISTER); assert(m_value < REG_MAX); return m_value; }
+ uint32_t freg() const { assert(m_type == PTYPE_FLOAT_REGISTER); assert(m_value < REG_MAX); return m_value; }
+ void *memory(uint32_t offset = 0) const { assert(m_type == PTYPE_MEMORY); return reinterpret_cast<void *>(m_value + offset); }
+
+ // type queries
+ bool is_immediate() const { return (m_type == PTYPE_IMMEDIATE); }
+ bool is_int_register() const { return (m_type == PTYPE_INT_REGISTER); }
+ bool is_float_register() const { return (m_type == PTYPE_FLOAT_REGISTER); }
+ bool is_memory() const { return (m_type == PTYPE_MEMORY); }
+
+ // other queries
+ bool is_immediate_value(uint64_t value) const { return (m_type == PTYPE_IMMEDIATE && m_value == value); }
+
+ // helpers
+ Gpd select_register(Gpd const &defreg) const;
+ Xmm select_register(Xmm defreg) const;
+ template <typename T> T select_register(T defreg, be_parameter const &checkparam) const;
+ template <typename T> T select_register(T defreg, be_parameter const &checkparam, be_parameter const &checkparam2) const;
+
+ private:
+ // private constructor
+ be_parameter(be_parameter_type type, be_parameter_value value) : m_type(type), m_value(value) { }
+
+ // internals
+ be_parameter_type m_type; // parameter type
+ be_parameter_value m_value; // parameter value
+ };
+
+ // helpers
+ Mem MABS(void const *base, u32 const size = 0) const { return Mem(uintptr_t(base), size); }
+ void normalize_commutative(be_parameter &inner, be_parameter &outer);
+ void emit_combine_z_flags(Assembler &a);
+ void emit_combine_zs_flags(Assembler &a);
+ void emit_combine_z_shl_flags(Assembler &a);
+ void reset_last_upper_lower_reg();
+ void set_last_lower_reg(Assembler &a, be_parameter const &param, Gp const &reglo);
+ void set_last_upper_reg(Assembler &a, be_parameter const &param, Gp const &reghi);
+ bool can_skip_lower_load(Assembler &a, uint32_t *memref, Gp const &reglo);
+ bool can_skip_upper_load(Assembler &a, uint32_t *memref, Gp const &reghi);
+
+ [[noreturn]] void end_of_block() const;
+ static void debug_log_hashjmp(int mode, offs_t pc);
+
+ void generate_one(Assembler &a, const uml::instruction &inst);
+
+ // code generators
+ void op_handle(Assembler &a, const uml::instruction &inst);
+ void op_hash(Assembler &a, const uml::instruction &inst);
+ void op_label(Assembler &a, const uml::instruction &inst);
+ void op_comment(Assembler &a, const uml::instruction &inst);
+ void op_mapvar(Assembler &a, const uml::instruction &inst);
+
+ void op_nop(Assembler &a, const uml::instruction &inst);
+ void op_break(Assembler &a, const uml::instruction &inst);
+ void op_debug(Assembler &a, const uml::instruction &inst);
+ void op_exit(Assembler &a, const uml::instruction &inst);
+ void op_hashjmp(Assembler &a, const uml::instruction &inst);
+ void op_jmp(Assembler &a, const uml::instruction &inst);
+ void op_exh(Assembler &a, const uml::instruction &inst);
+ void op_callh(Assembler &a, const uml::instruction &inst);
+ void op_ret(Assembler &a, const uml::instruction &inst);
+ void op_callc(Assembler &a, const uml::instruction &inst);
+ void op_recover(Assembler &a, const uml::instruction &inst);
+
+ void op_setfmod(Assembler &a, const uml::instruction &inst);
+ void op_getfmod(Assembler &a, const uml::instruction &inst);
+ void op_getexp(Assembler &a, const uml::instruction &inst);
+ void op_getflgs(Assembler &a, const uml::instruction &inst);
+ void op_setflgs(Assembler &a, const uml::instruction &inst);
+ void op_save(Assembler &a, const uml::instruction &inst);
+ void op_restore(Assembler &a, const uml::instruction &inst);
+
+ void op_load(Assembler &a, const uml::instruction &inst);
+ void op_loads(Assembler &a, const uml::instruction &inst);
+ void op_store(Assembler &a, const uml::instruction &inst);
+ void op_read(Assembler &a, const uml::instruction &inst);
+ void op_readm(Assembler &a, const uml::instruction &inst);
+ void op_write(Assembler &a, const uml::instruction &inst);
+ void op_writem(Assembler &a, const uml::instruction &inst);
+ void op_carry(Assembler &a, const uml::instruction &inst);
+ void op_set(Assembler &a, const uml::instruction &inst);
+ void op_mov(Assembler &a, const uml::instruction &inst);
+ void op_sext(Assembler &a, const uml::instruction &inst);
+ void op_roland(Assembler &a, const uml::instruction &inst);
+ void op_rolins(Assembler &a, const uml::instruction &inst);
+ void op_add(Assembler &a, const uml::instruction &inst);
+ void op_addc(Assembler &a, const uml::instruction &inst);
+ void op_sub(Assembler &a, const uml::instruction &inst);
+ void op_subc(Assembler &a, const uml::instruction &inst);
+ void op_cmp(Assembler &a, const uml::instruction &inst);
+ void op_mulu(Assembler &a, const uml::instruction &inst);
+ void op_mululw(Assembler &a, const uml::instruction &inst);
+ void op_muls(Assembler &a, const uml::instruction &inst);
+ void op_mulslw(Assembler &a, const uml::instruction &inst);
+ void op_divu(Assembler &a, const uml::instruction &inst);
+ void op_divs(Assembler &a, const uml::instruction &inst);
+ void op_and(Assembler &a, const uml::instruction &inst);
+ void op_test(Assembler &a, const uml::instruction &inst);
+ void op_or(Assembler &a, const uml::instruction &inst);
+ void op_xor(Assembler &a, const uml::instruction &inst);
+ void op_lzcnt(Assembler &a, const uml::instruction &inst);
+ void op_tzcnt(Assembler &a, const uml::instruction &inst);
+ void op_bswap(Assembler &a, const uml::instruction &inst);
+ void op_shl(Assembler &a, const uml::instruction &inst);
+ void op_shr(Assembler &a, const uml::instruction &inst);
+ void op_sar(Assembler &a, const uml::instruction &inst);
+ void op_ror(Assembler &a, const uml::instruction &inst);
+ void op_rol(Assembler &a, const uml::instruction &inst);
+ void op_rorc(Assembler &a, const uml::instruction &inst);
+ void op_rolc(Assembler &a, const uml::instruction &inst);
+
+ void op_fload(Assembler &a, const uml::instruction &inst);
+ void op_fstore(Assembler &a, const uml::instruction &inst);
+ void op_fread(Assembler &a, const uml::instruction &inst);
+ void op_fwrite(Assembler &a, const uml::instruction &inst);
+ void op_fmov(Assembler &a, const uml::instruction &inst);
+ void op_ftoint(Assembler &a, const uml::instruction &inst);
+ void op_ffrint(Assembler &a, const uml::instruction &inst);
+ void op_ffrflt(Assembler &a, const uml::instruction &inst);
+ void op_frnds(Assembler &a, const uml::instruction &inst);
+ void op_fadd(Assembler &a, const uml::instruction &inst);
+ void op_fsub(Assembler &a, const uml::instruction &inst);
+ void op_fcmp(Assembler &a, const uml::instruction &inst);
+ void op_fmul(Assembler &a, const uml::instruction &inst);
+ void op_fdiv(Assembler &a, const uml::instruction &inst);
+ void op_fneg(Assembler &a, const uml::instruction &inst);
+ void op_fabs(Assembler &a, const uml::instruction &inst);
+ void op_fsqrt(Assembler &a, const uml::instruction &inst);
+ void op_frecip(Assembler &a, const uml::instruction &inst);
+ void op_frsqrt(Assembler &a, const uml::instruction &inst);
+ void op_fcopyi(Assembler &a, const uml::instruction &inst);
+ void op_icopyf(Assembler &a, const uml::instruction &inst);
+
+ // 32-bit code emission helpers
+ void emit_mov_r32_p32(Assembler &a, Gp const &reg, be_parameter const &param);
+ void emit_mov_r32_p32_keepflags(Assembler &a, Gp const &reg, be_parameter const &param);
+ void emit_mov_m32_p32(Assembler &a, Mem memref, be_parameter const &param);
+ void emit_mov_p32_r32(Assembler &a, be_parameter const &param, Gp const &reg);
+
+ template <typename T> void alu_op_param(Assembler &a, Inst::Id const opcode, asmjit::Operand const &dst, be_parameter const &param, T &&optimize);
+ void alu_op_param(Assembler &a, Inst::Id const opcode, asmjit::Operand const &dst, be_parameter const &param) { alu_op_param(a, opcode, dst, param, [](Assembler &a, asmjit::Operand dst, be_parameter const &src) { return false; }); }
+ template <typename T> void shift_op_param(Assembler &a, Inst::Id const opcode, size_t opsize, asmjit::Operand const &dst, be_parameter const &param, T &&optimize, bool update_flags);
+
+ // 64-bit code emission helpers
+ void emit_mov_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param);
+ void emit_mov_r64_p64_keepflags(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param);
+ void emit_mov_m64_p64(Assembler &a, Mem const &memref, be_parameter const &param);
+ void emit_mov_p64_r64(Assembler &a, be_parameter const &param, Gp const &reglo, Gp const &reghi);
+ void emit_and_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_and_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const &param, const uml::instruction &inst);
+ void emit_or_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_or_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const &param, const uml::instruction &inst);
+ void emit_xor_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_xor_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const &param, const uml::instruction &inst);
+ void emit_shl_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_shr_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_sar_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_rol_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_ror_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_rcl_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+ void emit_rcr_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const uml::instruction &inst);
+
+ void alu_op_param(Assembler &a, Inst::Id const opcode_lo, Inst::Id const opcode_hi, Gp const &lo, Gp const &hi, be_parameter const &param, bool const saveflags);
+ void alu_op_param(Assembler &a, Inst::Id const opcode_lo, Inst::Id const opcode_hi, Mem const &lo, Mem const &hi, be_parameter const &param, bool const saveflags);
+
+ // floating-point code emission helpers
+ void emit_fld_p(Assembler &a, int size, be_parameter const &param);
+ void emit_fstp_p(Assembler &a, int size, be_parameter const &param);
+
+ size_t emit(asmjit::CodeHolder &ch);
+
+ // internal state
+ drc_hash_table m_hash; // hash table state
+ drc_map_variables m_map; // code map
+ x86log_context * m_log; // logging
+ FILE * m_log_asmjit;
+ bool m_logged_common; // logged common code already?
+ bool const m_sse3; // do we have SSE3 support?
+
+ x86_entry_point_func m_entry; // entry point
+ x86code * m_exit; // exit point
+ x86code * m_nocode; // nocode handler
+ x86code * m_endofblock; // end of block handler
+ x86code * m_save; // save handler
+ x86code * m_restore; // restore handler
+
+ uint32_t * m_reglo[REG_MAX]; // pointer to low part of data for each register
+ uint32_t * m_reghi[REG_MAX]; // pointer to high part of data for each register
+ Gp m_last_lower_reg; // last register we stored a lower from
+ x86code * m_last_lower_pc; // PC after instruction where we last stored a lower register
+ uint32_t * m_last_lower_addr; // address where we last stored an lower register
+ Gp m_last_upper_reg; // last register we stored an upper from
+ x86code * m_last_upper_pc; // PC after instruction where we last stored an upper register
+ uint32_t * m_last_upper_addr; // address where we last stored an upper register
+ double m_fptemp; // temporary storage for floating point
+
+ uint16_t m_fpumode; // saved FPU mode
+ uint16_t m_fmodesave; // temporary location for saving
+
+ void * m_stacksave; // saved stack pointer
+ void * m_hashstacksave; // saved stack pointer for hashjmp
+ uint64_t m_reslo; // extended low result
+ uint64_t m_reshi; // extended high result
+
+ // resolved memory handler functions
+ resolved_member_function m_debug_cpu_instruction_hook;
+ resolved_member_function m_drcmap_get_value;
+ resolved_memory_accessors_vector m_memory_accessors;
};
@@ -196,99 +678,107 @@ static const uint16_t fp_control[4] =
// TABLES
//**************************************************************************
-const drcbe_x86::opcode_table_entry drcbe_x86::s_opcode_table_source[] =
+inline void drcbe_x86::generate_one(Assembler &a, const uml::instruction &inst)
{
+ switch (inst.opcode())
+ {
// Compile-time opcodes
- { uml::OP_HANDLE, &drcbe_x86::op_handle }, // HANDLE handle
- { uml::OP_HASH, &drcbe_x86::op_hash }, // HASH mode,pc
- { uml::OP_LABEL, &drcbe_x86::op_label }, // LABEL imm
- { uml::OP_COMMENT, &drcbe_x86::op_comment }, // COMMENT string
- { uml::OP_MAPVAR, &drcbe_x86::op_mapvar }, // MAPVAR mapvar,value
+ case uml::OP_HANDLE: op_handle(a, inst); break; // HANDLE handle
+ case uml::OP_HASH: op_hash(a, inst); break; // HASH mode,pc
+ case uml::OP_LABEL: op_label(a, inst); break; // LABEL imm
+ case uml::OP_COMMENT: op_comment(a, inst); break; // COMMENT string
+ case uml::OP_MAPVAR: op_mapvar(a, inst); break; // MAPVAR mapvar,value
// Control Flow Operations
- { uml::OP_NOP, &drcbe_x86::op_nop }, // NOP
- { uml::OP_DEBUG, &drcbe_x86::op_debug }, // DEBUG pc
- { uml::OP_EXIT, &drcbe_x86::op_exit }, // EXIT src1[,c]
- { uml::OP_HASHJMP, &drcbe_x86::op_hashjmp }, // HASHJMP mode,pc,handle
- { uml::OP_JMP, &drcbe_x86::op_jmp }, // JMP imm[,c]
- { uml::OP_EXH, &drcbe_x86::op_exh }, // EXH handle,param[,c]
- { uml::OP_CALLH, &drcbe_x86::op_callh }, // CALLH handle[,c]
- { uml::OP_RET, &drcbe_x86::op_ret }, // RET [c]
- { uml::OP_CALLC, &drcbe_x86::op_callc }, // CALLC func,ptr[,c]
- { uml::OP_RECOVER, &drcbe_x86::op_recover }, // RECOVER dst,mapvar
+ case uml::OP_NOP: op_nop(a, inst); break; // NOP
+ case uml::OP_BREAK: op_break(a, inst); break; // BREAK
+ case uml::OP_DEBUG: op_debug(a, inst); break; // DEBUG pc
+ case uml::OP_EXIT: op_exit(a, inst); break; // EXIT src1[,c]
+ case uml::OP_HASHJMP: op_hashjmp(a, inst); break; // HASHJMP mode,pc,handle
+ case uml::OP_JMP: op_jmp(a, inst); break; // JMP imm[,c]
+ case uml::OP_EXH: op_exh(a, inst); break; // EXH handle,param[,c]
+ case uml::OP_CALLH: op_callh(a, inst); break; // CALLH handle[,c]
+ case uml::OP_RET: op_ret(a, inst); break; // RET [c]
+ case uml::OP_CALLC: op_callc(a, inst); break; // CALLC func,ptr[,c]
+ case uml::OP_RECOVER: op_recover(a, inst); break; // RECOVER dst,mapvar
// Internal Register Operations
- { uml::OP_SETFMOD, &drcbe_x86::op_setfmod }, // SETFMOD src
- { uml::OP_GETFMOD, &drcbe_x86::op_getfmod }, // GETFMOD dst
- { uml::OP_GETEXP, &drcbe_x86::op_getexp }, // GETEXP dst
- { uml::OP_GETFLGS, &drcbe_x86::op_getflgs }, // GETFLGS dst[,f]
- { uml::OP_SAVE, &drcbe_x86::op_save }, // SAVE dst
- { uml::OP_RESTORE, &drcbe_x86::op_restore }, // RESTORE dst
+ case uml::OP_SETFMOD: op_setfmod(a, inst); break; // SETFMOD src
+ case uml::OP_GETFMOD: op_getfmod(a, inst); break; // GETFMOD dst
+ case uml::OP_GETEXP: op_getexp(a, inst); break; // GETEXP dst
+ case uml::OP_GETFLGS: op_getflgs(a, inst); break; // GETFLGS dst[,f]
+ case uml::OP_SETFLGS: op_setflgs(a, inst); break; // GETFLGS src
+ case uml::OP_SAVE: op_save(a, inst); break; // SAVE dst
+ case uml::OP_RESTORE: op_restore(a, inst); break; // RESTORE dst
// Integer Operations
- { uml::OP_LOAD, &drcbe_x86::op_load }, // LOAD dst,base,index,size
- { uml::OP_LOADS, &drcbe_x86::op_loads }, // LOADS dst,base,index,size
- { uml::OP_STORE, &drcbe_x86::op_store }, // STORE base,index,src,size
- { uml::OP_READ, &drcbe_x86::op_read }, // READ dst,src1,spacesize
- { uml::OP_READM, &drcbe_x86::op_readm }, // READM dst,src1,mask,spacesize
- { uml::OP_WRITE, &drcbe_x86::op_write }, // WRITE dst,src1,spacesize
- { uml::OP_WRITEM, &drcbe_x86::op_writem }, // WRITEM dst,src1,spacesize
- { uml::OP_CARRY, &drcbe_x86::op_carry }, // CARRY src,bitnum
- { uml::OP_SET, &drcbe_x86::op_set }, // SET dst,c
- { uml::OP_MOV, &drcbe_x86::op_mov }, // MOV dst,src[,c]
- { uml::OP_SEXT, &drcbe_x86::op_sext }, // SEXT dst,src
- { uml::OP_ROLAND, &drcbe_x86::op_roland }, // ROLAND dst,src1,src2,src3
- { uml::OP_ROLINS, &drcbe_x86::op_rolins }, // ROLINS dst,src1,src2,src3
- { uml::OP_ADD, &drcbe_x86::op_add }, // ADD dst,src1,src2[,f]
- { uml::OP_ADDC, &drcbe_x86::op_addc }, // ADDC dst,src1,src2[,f]
- { uml::OP_SUB, &drcbe_x86::op_sub }, // SUB dst,src1,src2[,f]
- { uml::OP_SUBB, &drcbe_x86::op_subc }, // SUBB dst,src1,src2[,f]
- { uml::OP_CMP, &drcbe_x86::op_cmp }, // CMP src1,src2[,f]
- { uml::OP_MULU, &drcbe_x86::op_mulu }, // MULU dst,edst,src1,src2[,f]
- { uml::OP_MULS, &drcbe_x86::op_muls }, // MULS dst,edst,src1,src2[,f]
- { uml::OP_DIVU, &drcbe_x86::op_divu }, // DIVU dst,edst,src1,src2[,f]
- { uml::OP_DIVS, &drcbe_x86::op_divs }, // DIVS dst,edst,src1,src2[,f]
- { uml::OP_AND, &drcbe_x86::op_and }, // AND dst,src1,src2[,f]
- { uml::OP_TEST, &drcbe_x86::op_test }, // TEST src1,src2[,f]
- { uml::OP_OR, &drcbe_x86::op_or }, // OR dst,src1,src2[,f]
- { uml::OP_XOR, &drcbe_x86::op_xor }, // XOR dst,src1,src2[,f]
- { uml::OP_LZCNT, &drcbe_x86::op_lzcnt }, // LZCNT dst,src[,f]
- { uml::OP_TZCNT, &drcbe_x86::op_tzcnt }, // TZCNT dst,src[,f]
- { uml::OP_BSWAP, &drcbe_x86::op_bswap }, // BSWAP dst,src
- { uml::OP_SHL, &drcbe_x86::op_shl }, // SHL dst,src,count[,f]
- { uml::OP_SHR, &drcbe_x86::op_shr }, // SHR dst,src,count[,f]
- { uml::OP_SAR, &drcbe_x86::op_sar }, // SAR dst,src,count[,f]
- { uml::OP_ROL, &drcbe_x86::op_rol }, // ROL dst,src,count[,f]
- { uml::OP_ROLC, &drcbe_x86::op_rolc }, // ROLC dst,src,count[,f]
- { uml::OP_ROR, &drcbe_x86::op_ror }, // ROR dst,src,count[,f]
- { uml::OP_RORC, &drcbe_x86::op_rorc }, // RORC dst,src,count[,f]
+ case uml::OP_LOAD: op_load(a, inst); break; // LOAD dst,base,index,size
+ case uml::OP_LOADS: op_loads(a, inst); break; // LOADS dst,base,index,size
+ case uml::OP_STORE: op_store(a, inst); break; // STORE base,index,src,size
+ case uml::OP_READ: op_read(a, inst); break; // READ dst,src1,spacesize
+ case uml::OP_READM: op_readm(a, inst); break; // READM dst,src1,mask,spacesize
+ case uml::OP_WRITE: op_write(a, inst); break; // WRITE dst,src1,spacesize
+ case uml::OP_WRITEM: op_writem(a, inst); break; // WRITEM dst,src1,spacesize
+ case uml::OP_CARRY: op_carry(a, inst); break; // CARRY src,bitnum
+ case uml::OP_SET: op_set(a, inst); break; // SET dst,c
+ case uml::OP_MOV: op_mov(a, inst); break; // MOV dst,src[,c]
+ case uml::OP_SEXT: op_sext(a, inst); break; // SEXT dst,src
+ case uml::OP_ROLAND: op_roland(a, inst); break; // ROLAND dst,src1,src2,src3
+ case uml::OP_ROLINS: op_rolins(a, inst); break; // ROLINS dst,src1,src2,src3
+ case uml::OP_ADD: op_add(a, inst); break; // ADD dst,src1,src2[,f]
+ case uml::OP_ADDC: op_addc(a, inst); break; // ADDC dst,src1,src2[,f]
+ case uml::OP_SUB: op_sub(a, inst); break; // SUB dst,src1,src2[,f]
+ case uml::OP_SUBB: op_subc(a, inst); break; // SUBB dst,src1,src2[,f]
+ case uml::OP_CMP: op_cmp(a, inst); break; // CMP src1,src2[,f]
+ case uml::OP_MULU: op_mulu(a, inst); break; // MULU dst,edst,src1,src2[,f]
+ case uml::OP_MULULW: op_mululw(a, inst); break; // MULULW dst,src1,src2[,f]
+ case uml::OP_MULS: op_muls(a, inst); break; // MULS dst,edst,src1,src2[,f]
+ case uml::OP_MULSLW: op_mulslw(a, inst); break; // MULSLW dst,src1,src2[,f]
+ case uml::OP_DIVU: op_divu(a, inst); break; // DIVU dst,edst,src1,src2[,f]
+ case uml::OP_DIVS: op_divs(a, inst); break; // DIVS dst,edst,src1,src2[,f]
+ case uml::OP_AND: op_and(a, inst); break; // AND dst,src1,src2[,f]
+ case uml::OP_TEST: op_test(a, inst); break; // TEST src1,src2[,f]
+ case uml::OP_OR: op_or(a, inst); break; // OR dst,src1,src2[,f]
+ case uml::OP_XOR: op_xor(a, inst); break; // XOR dst,src1,src2[,f]
+ case uml::OP_LZCNT: op_lzcnt(a, inst); break; // LZCNT dst,src[,f]
+ case uml::OP_TZCNT: op_tzcnt(a, inst); break; // TZCNT dst,src[,f]
+ case uml::OP_BSWAP: op_bswap(a, inst); break; // BSWAP dst,src
+ case uml::OP_SHL: op_shl(a, inst); break; // SHL dst,src,count[,f]
+ case uml::OP_SHR: op_shr(a, inst); break; // SHR dst,src,count[,f]
+ case uml::OP_SAR: op_sar(a, inst); break; // SAR dst,src,count[,f]
+ case uml::OP_ROL: op_rol(a, inst); break; // ROL dst,src,count[,f]
+ case uml::OP_ROLC: op_rolc(a, inst); break; // ROLC dst,src,count[,f]
+ case uml::OP_ROR: op_ror(a, inst); break; // ROR dst,src,count[,f]
+ case uml::OP_RORC: op_rorc(a, inst); break; // RORC dst,src,count[,f]
// Floating Point Operations
- { uml::OP_FLOAD, &drcbe_x86::op_fload }, // FLOAD dst,base,index
- { uml::OP_FSTORE, &drcbe_x86::op_fstore }, // FSTORE base,index,src
- { uml::OP_FREAD, &drcbe_x86::op_fread }, // FREAD dst,space,src1
- { uml::OP_FWRITE, &drcbe_x86::op_fwrite }, // FWRITE space,dst,src1
- { uml::OP_FMOV, &drcbe_x86::op_fmov }, // FMOV dst,src1[,c]
- { uml::OP_FTOINT, &drcbe_x86::op_ftoint }, // FTOINT dst,src1,size,round
- { uml::OP_FFRINT, &drcbe_x86::op_ffrint }, // FFRINT dst,src1,size
- { uml::OP_FFRFLT, &drcbe_x86::op_ffrflt }, // FFRFLT dst,src1,size
- { uml::OP_FRNDS, &drcbe_x86::op_frnds }, // FRNDS dst,src1
- { uml::OP_FADD, &drcbe_x86::op_fadd }, // FADD dst,src1,src2
- { uml::OP_FSUB, &drcbe_x86::op_fsub }, // FSUB dst,src1,src2
- { uml::OP_FCMP, &drcbe_x86::op_fcmp }, // FCMP src1,src2
- { uml::OP_FMUL, &drcbe_x86::op_fmul }, // FMUL dst,src1,src2
- { uml::OP_FDIV, &drcbe_x86::op_fdiv }, // FDIV dst,src1,src2
- { uml::OP_FNEG, &drcbe_x86::op_fneg }, // FNEG dst,src1
- { uml::OP_FABS, &drcbe_x86::op_fabs }, // FABS dst,src1
- { uml::OP_FSQRT, &drcbe_x86::op_fsqrt }, // FSQRT dst,src1
- { uml::OP_FRECIP, &drcbe_x86::op_frecip }, // FRECIP dst,src1
- { uml::OP_FRSQRT, &drcbe_x86::op_frsqrt }, // FRSQRT dst,src1
- { uml::OP_FCOPYI, &drcbe_x86::op_fcopyi }, // FCOPYI dst,src
- { uml::OP_ICOPYF, &drcbe_x86::op_icopyf }, // ICOPYF dst,src
+ case uml::OP_FLOAD: op_fload(a, inst); break; // FLOAD dst,base,index
+ case uml::OP_FSTORE: op_fstore(a, inst); break; // FSTORE base,index,src
+ case uml::OP_FREAD: op_fread(a, inst); break; // FREAD dst,space,src1
+ case uml::OP_FWRITE: op_fwrite(a, inst); break; // FWRITE space,dst,src1
+ case uml::OP_FMOV: op_fmov(a, inst); break; // FMOV dst,src1[,c]
+ case uml::OP_FTOINT: op_ftoint(a, inst); break; // FTOINT dst,src1,size,round
+ case uml::OP_FFRINT: op_ffrint(a, inst); break; // FFRINT dst,src1,size
+ case uml::OP_FFRFLT: op_ffrflt(a, inst); break; // FFRFLT dst,src1,size
+ case uml::OP_FRNDS: op_frnds(a, inst); break; // FRNDS dst,src1
+ case uml::OP_FADD: op_fadd(a, inst); break; // FADD dst,src1,src2
+ case uml::OP_FSUB: op_fsub(a, inst); break; // FSUB dst,src1,src2
+ case uml::OP_FCMP: op_fcmp(a, inst); break; // FCMP src1,src2
+ case uml::OP_FMUL: op_fmul(a, inst); break; // FMUL dst,src1,src2
+ case uml::OP_FDIV: op_fdiv(a, inst); break; // FDIV dst,src1,src2
+ case uml::OP_FNEG: op_fneg(a, inst); break; // FNEG dst,src1
+ case uml::OP_FABS: op_fabs(a, inst); break; // FABS dst,src1
+ case uml::OP_FSQRT: op_fsqrt(a, inst); break; // FSQRT dst,src1
+ case uml::OP_FRECIP: op_frecip(a, inst); break; // FRECIP dst,src1
+ case uml::OP_FRSQRT: op_frsqrt(a, inst); break; // FRSQRT dst,src1
+ case uml::OP_FCOPYI: op_fcopyi(a, inst); break; // FCOPYI dst,src
+ case uml::OP_ICOPYF: op_icopyf(a, inst); break; // ICOPYF dst,src
+
+ default: throw emu_fatalerror("drcbe_x86(%s): unhandled opcode %u\n", m_device.tag(), inst.opcode());
+ }
};
-
//**************************************************************************
// INLINE FUNCTIONS
//**************************************************************************
@@ -298,7 +788,7 @@ const drcbe_x86::opcode_table_entry drcbe_x86::s_opcode_table_source[] =
// into a reduced set
//-------------------------------------------------
-drcbe_x86::be_parameter::be_parameter(drcbe_x86 &drcbe, const parameter &param, uint32_t allowed)
+drcbe_x86::be_parameter::be_parameter(drcbe_x86 &drcbe, parameter const &param, uint32_t allowed)
{
int regnum;
@@ -347,21 +837,28 @@ drcbe_x86::be_parameter::be_parameter(drcbe_x86 &drcbe, const parameter &param,
// checkparam
//-------------------------------------------------
-inline int drcbe_x86::be_parameter::select_register(int defreg) const
+inline Gpd drcbe_x86::be_parameter::select_register(Gpd const &defreg) const
+{
+ if (m_type == PTYPE_INT_REGISTER)
+ return Gpd(m_value);
+ return defreg;
+}
+
+inline Xmm drcbe_x86::be_parameter::select_register(Xmm defreg) const
{
- if (m_type == PTYPE_INT_REGISTER || m_type == PTYPE_FLOAT_REGISTER || m_type == PTYPE_VECTOR_REGISTER)
- return m_value;
+ if (m_type == PTYPE_FLOAT_REGISTER)
+ return Xmm(m_value);
return defreg;
}
-inline int drcbe_x86::be_parameter::select_register(int defreg, const be_parameter &checkparam) const
+template <typename T> T drcbe_x86::be_parameter::select_register(T defreg, be_parameter const &checkparam) const
{
if (*this == checkparam)
return defreg;
return select_register(defreg);
}
-inline int drcbe_x86::be_parameter::select_register(int defreg, const be_parameter &checkparam, const be_parameter &checkparam2) const
+template <typename T> T drcbe_x86::be_parameter::select_register(T defreg, be_parameter const &checkparam, be_parameter const &checkparam2) const
{
if (*this == checkparam || *this == checkparam2)
return defreg;
@@ -400,16 +897,34 @@ inline void drcbe_x86::normalize_commutative(be_parameter &inner, be_parameter &
// two 32-bit operations
//-------------------------------------------------
-inline void drcbe_x86::emit_combine_z_flags(x86code *&dst)
+inline void drcbe_x86::emit_combine_z_flags(Assembler &a)
{
// this assumes that the flags from the low 32-bit op are on the stack
// and the flags from the high 32-bit op are live
- emit_pushf(dst); // pushf
- emit_mov_r32_m32(dst, REG_ECX, MBD(REG_ESP, 4)); // mov ecx,[esp+4]
- emit_or_r32_imm(dst, REG_ECX, ~0x40); // or ecx,~0x40
- emit_and_m32_r32(dst, MBD(REG_ESP, 0), REG_ECX); // and [esp],ecx
- emit_popf(dst); // popf
- emit_lea_r32_m32(dst, REG_ESP, MBD(REG_ESP, 4)); // lea esp,[esp+4]
+ a.pushfd();
+
+ a.mov(ecx, dword_ptr(esp, 4)); // zero flag
+ a.or_(ecx, ~0x40);
+ a.and_(dword_ptr(esp, 0), ecx);
+
+ a.popfd();
+ a.lea(esp, ptr(esp, 4));
+}
+
+inline void drcbe_x86::emit_combine_zs_flags(Assembler &a)
+{
+ // this assumes that the flags from the low 32-bit op are on the stack
+ // and the flags from the high 32-bit op are live
+ a.pushfd();
+
+ a.mov(ecx, dword_ptr(esp, 4)); // zero flag
+ a.or_(ecx, ~(0x40 | 0x80));
+ a.and_(dword_ptr(esp, 0), ecx);
+ a.and_(ecx, 0x80); // sign flag
+ a.or_(dword_ptr(esp, 0), ecx);
+
+ a.popfd();
+ a.lea(esp, ptr(esp, 4));
}
@@ -418,15 +933,15 @@ inline void drcbe_x86::emit_combine_z_flags(x86code *&dst)
// flags from two 32-bit shift left operations
//-------------------------------------------------
-inline void drcbe_x86::emit_combine_z_shl_flags(x86code *&dst)
+inline void drcbe_x86::emit_combine_z_shl_flags(Assembler &a)
{
// this assumes that the flags from the high 32-bit op are on the stack
// and the flags from the low 32-bit op are live
- emit_pushf(dst); // pushf
- emit_pop_r32(dst, REG_ECX); // pop ecx
- emit_or_r32_imm(dst, REG_ECX, ~0x40); // or ecx,~0x40
- emit_and_m32_r32(dst, MBD(REG_ESP, 0), REG_ECX); // and [esp],ecx
- emit_popf(dst); // popf
+ a.pushfd();
+ a.pop(ecx);
+ a.or_(ecx, ~0x40);
+ a.and_(ptr(esp, 0), ecx);
+ a.popfd();
}
@@ -437,8 +952,8 @@ inline void drcbe_x86::emit_combine_z_shl_flags(x86code *&dst)
inline void drcbe_x86::reset_last_upper_lower_reg()
{
- m_last_lower_reg = REG_NONE;
- m_last_upper_reg = REG_NONE;
+ m_last_lower_reg = Gp();
+ m_last_upper_reg = Gp();
}
@@ -447,13 +962,13 @@ inline void drcbe_x86::reset_last_upper_lower_reg()
// loaded a lower register
//-------------------------------------------------
-inline void drcbe_x86::set_last_lower_reg(x86code *&dst, const be_parameter &param, uint8_t reglo)
+inline void drcbe_x86::set_last_lower_reg(Assembler &a, be_parameter const &param, Gp const &reglo)
{
if (param.is_memory())
{
m_last_lower_reg = reglo;
m_last_lower_addr = (uint32_t *)((uintptr_t)param.memory());
- m_last_lower_pc = dst;
+ m_last_lower_pc = (x86code *)(a.code()->baseAddress() + a.offset());
}
}
@@ -463,11 +978,11 @@ inline void drcbe_x86::set_last_lower_reg(x86code *&dst, const be_parameter &par
// loaded an upper register
//-------------------------------------------------
-inline void drcbe_x86::set_last_upper_reg(x86code *&dst, const be_parameter &param, uint8_t reghi)
+inline void drcbe_x86::set_last_upper_reg(Assembler &a, be_parameter const &param, Gp const &reghi)
{
m_last_upper_reg = reghi;
m_last_upper_addr = (param.is_int_register()) ? m_reghi[param.ireg()] : (uint32_t *)((uintptr_t)param.memory(4));
- m_last_upper_pc = dst;
+ m_last_upper_pc = (x86code *)(a.code()->baseAddress() + a.offset());
}
@@ -476,9 +991,9 @@ inline void drcbe_x86::set_last_upper_reg(x86code *&dst, const be_parameter &par
// skip re-loading a lower half of a register
//-------------------------------------------------
-inline bool drcbe_x86::can_skip_lower_load(x86code *&dst, uint32_t *memref, uint8_t reglo)
+inline bool drcbe_x86::can_skip_lower_load(Assembler &a, uint32_t *memref, Gp const &reglo)
{
- return (dst == m_last_lower_pc && memref == m_last_lower_addr && reglo == m_last_lower_reg);
+ return ((x86code *)(a.code()->baseAddress() + a.offset()) == m_last_lower_pc && memref == m_last_lower_addr && reglo == m_last_lower_reg);
}
@@ -487,66 +1002,46 @@ inline bool drcbe_x86::can_skip_lower_load(x86code *&dst, uint32_t *memref, uint
// skip re-loading an upper half of a register
//-------------------------------------------------
-inline bool drcbe_x86::can_skip_upper_load(x86code *&dst, uint32_t *memref, uint8_t reghi)
+inline bool drcbe_x86::can_skip_upper_load(Assembler &a, uint32_t *memref, Gp const &reghi)
{
- return (dst == m_last_upper_pc && memref == m_last_upper_addr && reghi == m_last_upper_reg);
+ return ((x86code *)(a.code()->baseAddress() + a.offset()) == m_last_upper_pc && memref == m_last_upper_addr && reghi == m_last_upper_reg);
}
//-------------------------------------------------
-// track_resolve_link - wrapper for resolve_link
-// that resets all register tracking info
-//-------------------------------------------------
-
-inline void drcbe_x86::track_resolve_link(x86code *&destptr, const emit_link &linkinfo)
-{
- reset_last_upper_lower_reg();
- resolve_link(destptr, linkinfo);
-}
-
-#define resolve_link INVALID
-
-
-
-//**************************************************************************
-// BACKEND CALLBACKS
-//**************************************************************************
-
-//-------------------------------------------------
// drcbe_x86 - constructor
//-------------------------------------------------
-drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits)
- : drcbe_interface(drcuml, cache, device),
- m_hash(cache, modes, addrbits, ignorebits),
- m_map(cache, 0),
- m_labels(cache),
- m_log(nullptr),
- m_logged_common(false),
- m_sse3(false),
- m_entry(nullptr),
- m_exit(nullptr),
- m_nocode(nullptr),
- m_save(nullptr),
- m_restore(nullptr),
- m_last_lower_reg(REG_NONE),
- m_last_lower_pc(nullptr),
- m_last_lower_addr(nullptr),
- m_last_upper_reg(REG_NONE),
- m_last_upper_pc(nullptr),
- m_last_upper_addr(nullptr),
- m_fptemp(0),
- m_fpumode(0),
- m_fmodesave(0),
- m_stacksave(nullptr),
- m_hashstacksave(nullptr),
- m_reslo(0),
- m_reshi(0),
- m_fixup_label(&drcbe_x86::fixup_label, this),
- m_fixup_exception(&drcbe_x86::fixup_exception, this)
+drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits) :
+ drcbe_interface(drcuml, cache, device)
+ , m_hash(cache, modes, addrbits, ignorebits)
+ , m_map(cache, 0)
+ , m_log(nullptr)
+ , m_log_asmjit(nullptr)
+ , m_logged_common(false)
+ , m_sse3(CpuInfo::host().features().x86().hasSSE3())
+ , m_entry(nullptr)
+ , m_exit(nullptr)
+ , m_nocode(nullptr)
+ , m_endofblock(nullptr)
+ , m_save(nullptr)
+ , m_restore(nullptr)
+ , m_last_lower_reg(Gp())
+ , m_last_lower_pc(nullptr)
+ , m_last_lower_addr(nullptr)
+ , m_last_upper_reg(Gp())
+ , m_last_upper_pc(nullptr)
+ , m_last_upper_addr(nullptr)
+ , m_fptemp(0)
+ , m_fpumode(0)
+ , m_fmodesave(0)
+ , m_stacksave(nullptr)
+ , m_hashstacksave(nullptr)
+ , m_reslo(0)
+ , m_reshi(0)
{
// compute hi pointers for each register
- for (int regnum = 0; regnum < ARRAY_LENGTH(int_register_map); regnum++)
+ for (int regnum = 0; regnum < std::size(int_register_map); regnum++)
if (int_register_map[regnum] != 0)
{
m_reglo[int_register_map[regnum]] = &m_state.r[regnum].w.l;
@@ -554,7 +1049,7 @@ drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, u
}
// build the flags map (static but it doesn't hurt to regenerate it)
- for (int entry = 0; entry < ARRAY_LENGTH(flags_map); entry++)
+ for (int entry = 0; entry < std::size(flags_map); entry++)
{
uint8_t flags = 0;
if (entry & 0x001) flags |= FLAG_C;
@@ -564,7 +1059,7 @@ drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, u
if (entry & 0x800) flags |= FLAG_V;
flags_map[entry] = flags;
}
- for (int entry = 0; entry < ARRAY_LENGTH(flags_unmap); entry++)
+ for (int entry = 0; entry < std::size(flags_unmap); entry++)
{
uint32_t flags = 0;
if (entry & FLAG_C) flags |= 0x001;
@@ -575,15 +1070,23 @@ drcbe_x86::drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, u
flags_unmap[entry] = flags;
}
- // build the opcode table (static but it doesn't hurt to regenerate it)
- for (auto & elem : s_opcode_table_source)
- s_opcode_table[elem.opcode] = elem.func;
+ // resolve the actual addresses of member functions we need to call
+ m_drcmap_get_value.set(m_map, &drc_map_variables::get_value);
+ if (!m_drcmap_get_value)
+ throw emu_fatalerror("Error resolving map variable get value function!\n");
+ m_memory_accessors.resize(m_space.size());
+ for (int space = 0; m_space.size() > space; ++space)
+ {
+ if (m_space[space])
+ m_memory_accessors[space].set(*m_space[space]);
+ }
// create the log
if (device.machine().options().drc_log_native())
{
std::string filename = std::string("drcbex86_").append(device.shortname()).append(".asm");
m_log = x86log_create_context(filename.c_str());
+ m_log_asmjit = fopen(std::string("drcbex86_asmjit_").append(device.shortname()).append(".asm").c_str(), "w");
}
}
@@ -597,8 +1100,50 @@ drcbe_x86::~drcbe_x86()
// free the log context
if (m_log != nullptr)
x86log_free_context(m_log);
+
+ if (m_log_asmjit)
+ fclose(m_log_asmjit);
}
+size_t drcbe_x86::emit(CodeHolder &ch)
+{
+ Error err;
+
+ // the following three calls aren't currently required, but may be if
+ // other asmjist features are used in future
+ if (false)
+ {
+ err = ch.flatten();
+ if (err)
+ throw emu_fatalerror("asmjit::CodeHolder::flatten() error %d", err);
+
+ err = ch.resolveUnresolvedLinks();
+ if (err)
+ throw emu_fatalerror("asmjit::CodeHolder::resolveUnresolvedLinks() error %d", err);
+
+ err = ch.relocateToBase(ch.baseAddress());
+ if (err)
+ throw emu_fatalerror("asmjit::CodeHolder::relocateToBase() error %d", err);
+ }
+
+ size_t const alignment = ch.baseAddress() - uint64_t(m_cache.top());
+ size_t const code_size = ch.codeSize();
+
+ // test if enough room remains in drc cache
+ drccodeptr *cachetop = m_cache.begin_codegen(alignment + code_size);
+ if (cachetop == nullptr)
+ return 0;
+
+ err = ch.copyFlattenedData(drccodeptr(ch.baseAddress()), code_size, CopySectionFlags::kPadTargetBuffer);
+ if (err)
+ throw emu_fatalerror("asmjit::CodeHolder::copyFlattenedData() error %d", err);
+
+ // update the drc cache and end codegen
+ *cachetop += alignment + code_size;
+ m_cache.end_codegen();
+
+ return code_size;
+}
//-------------------------------------------------
// reset - reset back-end specific state
@@ -611,139 +1156,161 @@ void drcbe_x86::reset()
x86log_printf(m_log, "%s", "\n\n===========\nCACHE RESET\n===========\n\n");
// generate a little bit of glue code to set up the environment
- drccodeptr *cachetop = m_cache.begin_codegen(500);
- if (cachetop == nullptr)
- fatalerror("Out of cache space after a reset!\n");
+ x86code *dst = (x86code *)m_cache.top();
- x86code *dst = (x86code *)*cachetop;
+ CodeHolder ch;
+ ch.init(Environment::host(), uint64_t(dst));
- // generate a simple CPUID stub
- uint32_t (*cpuid_ecx_stub)(void) = (uint32_t (*)(void))dst;
- emit_push_r32(dst, REG_EBX); // push ebx
- emit_mov_r32_imm(dst, REG_EAX, 1); // mov eax,1
- emit_cpuid(dst); // cpuid
- emit_mov_r32_r32(dst, REG_EAX, REG_ECX); // mov eax,ecx
- emit_pop_r32(dst, REG_EBX); // pop ebx
- emit_ret(dst); // ret
+ FileLogger logger(m_log_asmjit);
+ if (logger.file())
+ {
+ logger.setFlags(FormatFlags::kHexOffsets | FormatFlags::kHexImms | FormatFlags::kMachineCode);
+ logger.setIndentation(FormatIndentationGroup::kCode, 4);
+ ch.setLogger(&logger);
+ }
- // call it to determine if we have SSE3 support
- m_sse3 = (((*cpuid_ecx_stub)() & 1) != 0);
+ Assembler a(&ch);
+ if (logger.file())
+ a.addDiagnosticOptions(DiagnosticOptions::kValidateIntermediate);
// generate an entry point
m_entry = (x86_entry_point_func)dst;
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ESP, 4)); // mov eax,[esp+4]
- emit_push_r32(dst, REG_EBX); // push ebx
- emit_push_r32(dst, REG_ESI); // push esi
- emit_push_r32(dst, REG_EDI); // push edi
- emit_push_r32(dst, REG_EBP); // push ebp
- emit_sub_r32_imm(dst, REG_ESP, 24); // sub esp,24
- emit_mov_m32_r32(dst, MABS(&m_hashstacksave), REG_ESP); // mov [hashstacksave],esp
- emit_sub_r32_imm(dst, REG_ESP, 4); // sub esp,4
- emit_mov_m32_r32(dst, MABS(&m_stacksave), REG_ESP); // mov [stacksave],esp
- emit_fstcw_m16(dst, MABS(&m_fpumode)); // fstcw [fpumode]
- emit_jmp_r32(dst, REG_EAX); // jmp eax
- if (m_log != nullptr && !m_logged_common)
- x86log_disasm_code_range(m_log, "entry_point", (x86code *)m_entry, dst);
+ a.bind(a.newNamedLabel("entry_point"));
+
+ FuncDetail entry_point;
+ entry_point.init(FuncSignature::build<uint32_t, x86code *>(CallConvId::kHost), Environment::host());
+
+ FuncFrame frame;
+ frame.init(entry_point);
+ frame.addDirtyRegs(ebx, esi, edi, ebp);
+ FuncArgsAssignment args(&entry_point);
+ args.assignAll(eax);
+ args.updateFuncFrame(frame);
+ frame.finalize();
+
+ a.emitProlog(frame);
+ a.emitArgsAssignment(frame, args);
+ a.sub(esp, 24); // sub esp,24
+ a.mov(MABS(&m_hashstacksave), esp); // mov [hashstacksave],esp
+ a.sub(esp, 4); // sub esp,4
+ a.mov(MABS(&m_stacksave), esp); // mov [stacksave],esp
+ a.fnstcw(MABS(&m_fpumode)); // fstcw [fpumode]
+ a.jmp(eax); // jmp eax
// generate an exit point
- m_exit = dst;
- emit_fldcw_m16(dst, MABS(&m_fpumode)); // fldcw [fpumode]
- emit_mov_r32_m32(dst, REG_ESP, MABS(&m_hashstacksave)); // mov esp,[hashstacksave]
- emit_add_r32_imm(dst, REG_ESP, 24); // add esp,24
- emit_pop_r32(dst, REG_EBP); // pop ebp
- emit_pop_r32(dst, REG_EDI); // pop edi
- emit_pop_r32(dst, REG_ESI); // pop esi
- emit_pop_r32(dst, REG_EBX); // pop ebx
- emit_ret(dst); // ret
- if (m_log != nullptr && !m_logged_common)
- x86log_disasm_code_range(m_log, "exit_point", m_exit, dst);
+ m_exit = dst + a.offset();
+ a.bind(a.newNamedLabel("exit_point"));
+ a.fldcw(MABS(&m_fpumode)); // fldcw [fpumode]
+ a.mov(esp, MABS(&m_hashstacksave)); // mov esp,[hashstacksave]
+ a.add(esp, 24); // add esp,24
+ a.emitEpilog(frame);
// generate a no code point
- m_nocode = dst;
- emit_ret(dst); // ret
- if (m_log != nullptr && !m_logged_common)
- x86log_disasm_code_range(m_log, "nocode", m_nocode, dst);
+ m_nocode = dst + a.offset();
+ a.bind(a.newNamedLabel("nocode_point"));
+ a.ret(); // ret
+
+ // generate an end-of-block handler point
+ m_endofblock = dst + a.offset();
+ a.bind(a.newNamedLabel("end_of_block_point"));
+ auto const [entrypoint, adjusted] = util::resolve_member_function(&drcbe_x86::end_of_block, *this);
+ if (USE_THISCALL)
+ a.mov(ecx, imm(adjusted));
+ else
+ a.mov(dword_ptr(esp, 0), imm(adjusted));
+ a.call(imm(entrypoint));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
// generate a save subroutine
- m_save = dst;
- emit_pushf(dst); // pushf
- emit_pop_r32(dst, REG_EAX); // pop eax
- emit_and_r32_imm(dst, REG_EAX, 0x8c5); // and eax,0x8c5
- emit_mov_r8_m8(dst, REG_AL, MABSI(flags_map, REG_EAX)); // mov al,[flags_map]
- emit_mov_m8_r8(dst, MBD(REG_ECX, offsetof(drcuml_machine_state, flags)), REG_AL); // mov state->flags,al
- emit_mov_r8_m8(dst, REG_AL, MABS(&m_state.fmod)); // mov al,[fmod]
- emit_mov_m8_r8(dst, MBD(REG_ECX, offsetof(drcuml_machine_state, fmod)), REG_AL); // mov state->fmod,al
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.exp)); // mov eax,[exp]
- emit_mov_m32_r32(dst, MBD(REG_ECX, offsetof(drcuml_machine_state, exp)), REG_EAX); // mov state->exp,eax
- for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.r); regnum++)
+ m_save = dst + a.offset();
+ a.bind(a.newNamedLabel("save"));
+ a.pushfd(); // pushf
+ a.pop(eax); // pop eax
+ a.and_(eax, 0x8c5); // and eax,0x8c5
+ a.mov(al, ptr(uintptr_t(flags_map), eax)); // mov al,[flags_map]
+ a.mov(ptr(ecx, offsetof(drcuml_machine_state, flags)), al); // mov state->flags,al
+ a.mov(al, MABS(&m_state.fmod)); // mov al,[fmod]
+ a.mov(ptr(ecx, offsetof(drcuml_machine_state, fmod)), al); // mov state->fmod,al
+ a.mov(eax, MABS(&m_state.exp)); // mov eax,[exp]
+ a.mov(ptr(ecx, offsetof(drcuml_machine_state, exp)), eax); // mov state->exp,eax
+ for (int regnum = 0; regnum < std::size(m_state.r); regnum++)
{
uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.l;
uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.h;
if (int_register_map[regnum] != 0)
- emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsl), int_register_map[regnum]);
+ a.mov(ptr(ecx, regoffsl), Gpd(int_register_map[regnum]));
else
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.r[regnum].w.l));
- emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsl), REG_EAX);
+ a.mov(eax, MABS(&m_state.r[regnum].w.l));
+ a.mov(ptr(ecx, regoffsl), eax);
}
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.r[regnum].w.h));
- emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsh), REG_EAX);
+ a.mov(eax, MABS(&m_state.r[regnum].w.h));
+ a.mov(ptr(ecx, regoffsh), eax);
}
- for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.f); regnum++)
+ for (int regnum = 0; regnum < std::size(m_state.f); regnum++)
{
uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.l;
uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.h;
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.f[regnum].s.l));
- emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsl), REG_EAX);
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.f[regnum].s.h));
- emit_mov_m32_r32(dst, MBD(REG_ECX, regoffsh), REG_EAX);
+ a.mov(eax, MABS(&m_state.f[regnum].s.l));
+ a.mov(ptr(ecx, regoffsl), eax);
+ a.mov(eax, MABS(&m_state.f[regnum].s.h));
+ a.mov(ptr(ecx, regoffsh), eax);
}
- emit_ret(dst); // ret
- if (m_log != nullptr && !m_logged_common)
- x86log_disasm_code_range(m_log, "save", m_save, dst);
+ a.ret(); // ret
// generate a restore subroutine
- m_restore = dst;
- for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.r); regnum++)
+ m_restore = dst + a.offset();
+ a.bind(a.newNamedLabel("restore"));
+ for (int regnum = 0; regnum < std::size(m_state.r); regnum++)
{
uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.l;
uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->r[regnum].w.h;
if (int_register_map[regnum] != 0)
- emit_mov_r32_m32(dst, int_register_map[regnum], MBD(REG_ECX, regoffsl));
+ a.mov(Gpd(int_register_map[regnum]), ptr(ecx, regoffsl));
else
{
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsl));
- emit_mov_m32_r32(dst, MABS(&m_state.r[regnum].w.l), REG_EAX);
+ a.mov(eax, ptr(ecx, regoffsl));
+ a.mov(MABS(&m_state.r[regnum].w.l), eax);
}
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsh));
- emit_mov_m32_r32(dst, MABS(&m_state.r[regnum].w.h), REG_EAX);
+ a.mov(eax, ptr(ecx, regoffsh));
+ a.mov(MABS(&m_state.r[regnum].w.h), eax);
}
- for (int regnum = 0; regnum < ARRAY_LENGTH(m_state.f); regnum++)
+ for (int regnum = 0; regnum < std::size(m_state.f); regnum++)
{
uintptr_t regoffsl = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.l;
uintptr_t regoffsh = (uintptr_t)&((drcuml_machine_state *)nullptr)->f[regnum].s.h;
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsl));
- emit_mov_m32_r32(dst, MABS(&m_state.f[regnum].s.l), REG_EAX);
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, regoffsh));
- emit_mov_m32_r32(dst, MABS(&m_state.f[regnum].s.h), REG_EAX);
- }
- emit_movzx_r32_m8(dst, REG_EAX, MBD(REG_ECX, offsetof(drcuml_machine_state, fmod)));// movzx eax,state->fmod
- emit_and_r32_imm(dst, REG_EAX, 3); // and eax,3
- emit_mov_m8_r8(dst, MABS(&m_state.fmod), REG_AL); // mov [fmod],al
- emit_fldcw_m16(dst, MABSI(&fp_control[0], REG_EAX, 2)); // fldcw fp_control[eax]
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_ECX, offsetof(drcuml_machine_state, exp))); // mov eax,state->exp
- emit_mov_m32_r32(dst, MABS(&m_state.exp), REG_EAX); // mov [exp],eax
- emit_movzx_r32_m8(dst, REG_EAX, MBD(REG_ECX, offsetof(drcuml_machine_state, flags)));// movzx eax,state->flags
- emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4]
- emit_popf(dst); // popf
- emit_ret(dst); // ret
+ a.mov(eax, ptr(ecx, regoffsl));
+ a.mov(MABS(&m_state.f[regnum].s.l), eax);
+ a.mov(eax, ptr(ecx, regoffsh));
+ a.mov(MABS(&m_state.f[regnum].s.h), eax);
+ }
+ a.movzx(eax, byte_ptr(ecx, offsetof(drcuml_machine_state, fmod))); // movzx eax,state->fmod
+ a.and_(eax, 3); // and eax,3
+ a.mov(MABS(&m_state.fmod), al); // mov [fmod],al
+ a.fldcw(word_ptr(uintptr_t(&fp_control[0]), eax, 1)); // fldcw fp_control[eax*2]
+ a.mov(eax, ptr(ecx, offsetof(drcuml_machine_state, exp))); // mov eax,state->exp
+ a.mov(MABS(&m_state.exp), eax); // mov [exp],eax
+ a.movzx(eax, byte_ptr(ecx, offsetof(drcuml_machine_state, flags))); // movzx eax,state->flags
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.popfd(); // popf
+ a.ret(); // ret
+
+
+ // emit the generated code
+ size_t bytes = emit(ch);
+
if (m_log != nullptr && !m_logged_common)
- x86log_disasm_code_range(m_log, "restore", m_restore, dst);
+ {
+ x86log_disasm_code_range(m_log, "entry_point", dst, m_exit);
+ x86log_disasm_code_range(m_log, "exit_point", m_exit, m_nocode);
+ x86log_disasm_code_range(m_log, "nocode_point", m_nocode, m_endofblock);
+ x86log_disasm_code_range(m_log, "end_of_block", m_endofblock, m_save);
+ x86log_disasm_code_range(m_log, "save", m_save, m_restore);
+ x86log_disasm_code_range(m_log, "restore", m_restore, dst + bytes);
- // finish up codegen
- *cachetop = dst;
- m_cache.end_codegen();
- m_logged_common = true;
+ m_logged_common = true;
+ }
// reset our hash tables
m_hash.reset();
@@ -759,6 +1326,7 @@ void drcbe_x86::reset()
int drcbe_x86::execute(code_handle &entry)
{
// call our entry point which will jump to the destination
+ m_cache.codegen_complete();
return (*m_entry)((x86code *)entry.codeptr());
}
@@ -769,58 +1337,100 @@ int drcbe_x86::execute(code_handle &entry)
void drcbe_x86::generate(drcuml_block &block, const instruction *instlist, uint32_t numinst)
{
+ // do this here because device.debug() isn't initialised at construction time
+ if (!m_debug_cpu_instruction_hook && (m_device.machine().debug_flags & DEBUG_FLAG_ENABLED))
+ {
+ m_debug_cpu_instruction_hook.set(*m_device.debug(), &device_debug::instruction_hook);
+ if (!m_debug_cpu_instruction_hook)
+ throw emu_fatalerror("Error resolving debugger instruction hook member function!\n");
+ }
+
// tell all of our utility objects that a block is beginning
m_hash.block_begin(block, instlist, numinst);
- m_labels.block_begin(block);
m_map.block_begin(block);
- // begin codegen; fail if we can't
- drccodeptr *cachetop = m_cache.begin_codegen(numinst * 8 * 4);
- if (cachetop == nullptr)
- block.abort();
+ // compute the base by aligning the cache top to a cache line
+ auto [err, linesize] = osd_get_cache_line_size();
+ uintptr_t linemask = 63;
+ if (err)
+ {
+ osd_printf_verbose("Error getting cache line size (%s:%d %s), assuming 64 bytes\n", err.category().name(), err.value(), err.message());
+ }
+ else
+ {
+ assert(linesize);
+ linemask = linesize - 1;
+ for (unsigned shift = 1; linemask & (linemask + 1); ++shift)
+ linemask |= linemask >> shift;
+ }
+ x86code *dst = (x86code *)(uintptr_t(m_cache.top() + linemask) & ~linemask);
- // compute the base by aligning the cache top to a cache line (assumed to be 64 bytes)
- x86code *base = (x86code *)(((uintptr_t)*cachetop + 63) & ~63);
- x86code *dst = base;
+ CodeHolder ch;
+ ch.init(Environment::host(), uint64_t(dst));
+ ThrowableErrorHandler e;
+ ch.setErrorHandler(&e);
+
+ FileLogger logger(m_log_asmjit);
+ if (logger.file())
+ {
+ logger.setFlags(FormatFlags::kHexOffsets | FormatFlags::kHexImms | FormatFlags::kMachineCode);
+ logger.setIndentation(FormatIndentationGroup::kCode, 4);
+ ch.setLogger(&logger);
+ }
+
+ Assembler a(&ch);
+ if (logger.file())
+ a.addDiagnosticOptions(DiagnosticOptions::kValidateIntermediate);
// generate code
- const char *blockname = nullptr;
+ std::string blockname;
for (int inum = 0; inum < numinst; inum++)
{
const instruction &inst = instlist[inum];
- assert(inst.opcode() < ARRAY_LENGTH(s_opcode_table));
+
+ // must remain in scope until output
+ std::string dasm;
// add a comment
- if (m_log != nullptr)
+ if (m_log)
{
- std::string dasm = inst.disasm(&m_drcuml);
- x86log_add_comment(m_log, dst, "%s", dasm.c_str());
+ dasm = inst.disasm(&m_drcuml);
+ x86log_add_comment(m_log, dst + a.offset(), "%s", dasm.c_str());
+ a.setInlineComment(dasm.c_str());
}
// extract a blockname
- if (blockname == nullptr)
+ if (blockname.empty())
{
if (inst.opcode() == OP_HANDLE)
blockname = inst.param(0).handle().string();
else if (inst.opcode() == OP_HASH)
- blockname = string_format("Code: mode=%d PC=%08X", (uint32_t)inst.param(0).immediate(), (offs_t)inst.param(1).immediate()).c_str();
+ blockname = string_format("Code: mode=%d PC=%08X", (uint32_t)inst.param(0).immediate(), (offs_t)inst.param(1).immediate());
}
// generate code
- (this->*s_opcode_table[inst.opcode()])(dst, inst);
+ generate_one(a, inst);
}
- // complete codegen
- *cachetop = (drccodeptr)dst;
- m_cache.end_codegen();
+ // catch falling off the end of a block
+ if (m_log)
+ {
+ x86log_add_comment(m_log, dst + a.offset(), "%s", "end of block");
+ a.setInlineComment("end of block");
+ }
+ a.jmp(imm(m_endofblock));
+
+ // emit the generated code
+ size_t const bytes = emit(ch);
+ if (!bytes)
+ block.abort();
// log it
- if (m_log != nullptr)
- x86log_disasm_code_range(m_log, (blockname == nullptr) ? "Unknown block" : blockname, base, m_cache.top());
+ if (m_log)
+ x86log_disasm_code_range(m_log, (blockname.empty()) ? "Unknown block" : blockname.c_str(), dst, dst + bytes);
// tell all of our utility objects that the block is finished
m_hash.block_end(block);
- m_labels.block_end(block);
m_map.block_end(block);
}
@@ -830,7 +1440,7 @@ void drcbe_x86::generate(drcuml_block &block, const instruction *instlist, uint3
// given mode/pc exists in the hash table
//-------------------------------------------------
-bool drcbe_x86::hash_exists(uint32_t mode, uint32_t pc)
+bool drcbe_x86::hash_exists(uint32_t mode, uint32_t pc) const noexcept
{
return m_hash.code_exists(mode, pc);
}
@@ -841,7 +1451,7 @@ bool drcbe_x86::hash_exists(uint32_t mode, uint32_t pc)
// the back-end implementation
//-------------------------------------------------
-void drcbe_x86::get_info(drcbe_info &info)
+void drcbe_x86::get_info(drcbe_info &info) const noexcept
{
for (info.direct_iregs = 0; info.direct_iregs < REG_I_COUNT; info.direct_iregs++)
if (int_register_map[info.direct_iregs] == 0)
@@ -860,21 +1470,23 @@ void drcbe_x86::get_info(drcbe_info &info)
// into a register
//-------------------------------------------------
-void drcbe_x86::emit_mov_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param)
+void drcbe_x86::emit_mov_r32_p32(Assembler &a, Gp const &reg, be_parameter const &param)
{
if (param.is_immediate())
{
if (param.immediate() == 0)
- emit_xor_r32_r32(dst, reg, reg); // xor reg,reg
+ a.xor_(reg, reg); // xor reg,reg
else
- emit_mov_r32_imm(dst, reg, param.immediate()); // mov reg,param
+ a.mov(reg, param.immediate()); // mov reg,param
}
else if (param.is_memory())
- emit_mov_r32_m32(dst, reg, MABS(param.memory())); // mov reg,[param]
+ {
+ a.mov(reg, MABS(param.memory())); // mov reg,[param]
+ }
else if (param.is_int_register())
{
- if (reg != param.ireg())
- emit_mov_r32_r32(dst, reg, param.ireg()); // mov reg,param
+ if (reg.id() != param.ireg())
+ a.mov(reg, Gpd(param.ireg())); // mov reg,param
}
}
@@ -885,19 +1497,19 @@ void drcbe_x86::emit_mov_r32_p32(x86code *&dst, uint8_t reg, const be_parameter
// any flags
//-------------------------------------------------
-void drcbe_x86::emit_mov_r32_p32_keepflags(x86code *&dst, uint8_t reg, const be_parameter &param)
+void drcbe_x86::emit_mov_r32_p32_keepflags(Assembler &a, Gp const &reg, be_parameter const &param)
{
if (param.is_immediate())
- emit_mov_r32_imm(dst, reg, param.immediate()); // mov reg,param
+ a.mov(reg, param.immediate()); // mov reg,param
else if (param.is_memory())
{
- if (!can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), reg))
- emit_mov_r32_m32(dst, reg, MABS(param.memory())); // mov reg,[param]
+ if (!can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory()), reg))
+ a.mov(reg, MABS(param.memory())); // mov reg,[param]
}
else if (param.is_int_register())
{
- if (reg != param.ireg())
- emit_mov_r32_r32(dst, reg, param.ireg()); // mov reg,param
+ if (reg.id() != param.ireg())
+ a.mov(reg, Gpd(param.ireg())); // mov reg,param
}
}
@@ -907,18 +1519,18 @@ void drcbe_x86::emit_mov_r32_p32_keepflags(x86code *&dst, uint8_t reg, const be_
// into a memory location
//-------------------------------------------------
-void drcbe_x86::emit_mov_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param)
+void drcbe_x86::emit_mov_m32_p32(Assembler &a, Mem memref, be_parameter const &param)
{
if (param.is_immediate())
- emit_mov_m32_imm(dst, memref, param.immediate()); // mov [mem],param
+ a.mov(memref, param.immediate()); // mov [mem],param
else if (param.is_memory())
{
- if (!can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), REG_EAX))
- emit_mov_r32_m32(dst, REG_EAX, MABS(param.memory())); // mov eax,[param]
- emit_mov_m32_r32(dst, memref, REG_EAX); // mov [mem],eax
+ if (!can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory()), eax))
+ a.mov(eax, MABS(param.memory())); // mov eax,[param]
+ a.mov(memref, eax); // mov [mem],eax
}
else if (param.is_int_register())
- emit_mov_m32_r32(dst, memref, param.ireg()); // mov [mem],param
+ a.mov(memref, Gpd(param.ireg())); // mov [mem],param
}
@@ -927,687 +1539,103 @@ void drcbe_x86::emit_mov_m32_p32(x86code *&dst, x86_memref memref, const be_para
// 32-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_mov_p32_r32(x86code *&dst, const be_parameter &param, uint8_t reg)
+void drcbe_x86::emit_mov_p32_r32(Assembler &a, be_parameter const &param, Gp const &reg)
{
assert(!param.is_immediate());
if (param.is_memory())
{
- emit_mov_m32_r32(dst, MABS(param.memory()), reg); // mov [param],reg
- set_last_lower_reg(dst, param, reg);
- }
- else if (param.is_int_register())
- {
- if (reg != param.ireg())
- emit_mov_r32_r32(dst, param.ireg(), reg); // mov param,reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_add_r32_p32 - add operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_add_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() != 0 || param.immediate() != 0)
- emit_add_r32_imm(dst, reg, param.immediate()); // add reg,param
- }
- else if (param.is_memory())
- emit_add_r32_m32(dst, reg, MABS(param.memory())); // add reg,[param]
- else if (param.is_int_register())
- emit_add_r32_r32(dst, reg, param.ireg()); // add reg,param
-}
-
-
-//-------------------------------------------------
-// emit_add_m32_p32 - add operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_add_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() != 0 || param.immediate() != 0)
- emit_add_m32_imm(dst, memref, param.immediate()); // add [dest],param
- }
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32(dst, reg, param); // mov reg,param
- emit_add_m32_r32(dst, memref, reg); // add [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_adc_r32_p32 - adc operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_adc_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_adc_r32_imm(dst, reg, param.immediate()); // adc reg,param
- else if (param.is_memory())
- emit_adc_r32_m32(dst, reg, MABS(param.memory())); // adc reg,[param]
- else if (param.is_int_register())
- emit_adc_r32_r32(dst, reg, param.ireg()); // adc reg,param
-}
-
-
-//-------------------------------------------------
-// emit_adc_m32_p32 - adc operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_adc_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_adc_m32_imm(dst, memref, param.immediate()); // adc [dest],param
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32_keepflags(dst, reg, param); // mov reg,param
- emit_adc_m32_r32(dst, memref, reg); // adc [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_sub_r32_p32 - sub operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sub_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() != 0 || param.immediate() != 0)
- emit_sub_r32_imm(dst, reg, param.immediate()); // sub reg,param
- }
- else if (param.is_memory())
- emit_sub_r32_m32(dst, reg, MABS(param.memory())); // sub reg,[param]
- else if (param.is_int_register())
- emit_sub_r32_r32(dst, reg, param.ireg()); // sub reg,param
-}
-
-
-//-------------------------------------------------
-// emit_sub_m32_p32 - sub operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sub_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() != 0 || param.immediate() != 0)
- emit_sub_m32_imm(dst, memref, param.immediate()); // sub [dest],param
- }
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32(dst, reg, param); // mov reg,param
- emit_sub_m32_r32(dst, memref, reg); // sub [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_sbb_r32_p32 - sbb operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sbb_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_sbb_r32_imm(dst, reg, param.immediate()); // sbb reg,param
- else if (param.is_memory())
- emit_sbb_r32_m32(dst, reg, MABS(param.memory())); // sbb reg,[param]
- else if (param.is_int_register())
- emit_sbb_r32_r32(dst, reg, param.ireg()); // sbb reg,param
-}
-
-
-//-------------------------------------------------
-// emit_sbb_m32_p32 - sbb operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sbb_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_sbb_m32_imm(dst, memref, param.immediate()); // sbb [dest],param
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32_keepflags(dst, reg, param); // mov reg,param
- emit_sbb_m32_r32(dst, memref, reg); // sbb [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_cmp_r32_p32 - cmp operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_cmp_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_cmp_r32_imm(dst, reg, param.immediate()); // cmp reg,param
- else if (param.is_memory())
- emit_cmp_r32_m32(dst, reg, MABS(param.memory())); // cmp reg,[param]
- else if (param.is_int_register())
- emit_cmp_r32_r32(dst, reg, param.ireg()); // cmp reg,param
-}
-
-
-//-------------------------------------------------
-// emit_cmp_m32_p32 - cmp operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_cmp_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_cmp_m32_imm(dst, memref, param.immediate()); // cmp [dest],param
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32(dst, reg, param); // mov reg,param
- emit_cmp_m32_r32(dst, memref, reg); // cmp [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_and_r32_p32 - and operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_and_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- ;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- emit_xor_r32_r32(dst, reg, reg); // xor reg,reg
- else
- emit_and_r32_imm(dst, reg, param.immediate()); // and reg,param
- }
- else if (param.is_memory())
- emit_and_r32_m32(dst, reg, MABS(param.memory())); // and reg,[param]
- else if (param.is_int_register())
- emit_and_r32_r32(dst, reg, param.ireg()); // and reg,param
-}
-
-
-//-------------------------------------------------
-// emit_and_m32_p32 - and operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_and_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- ;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- emit_mov_m32_imm(dst, memref, 0); // mov [dest],0
- else
- emit_and_m32_imm(dst, memref, param.immediate()); // and [dest],param
+ a.mov(MABS(param.memory()), reg); // mov [param],reg
+ set_last_lower_reg(a, param, reg);
}
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32(dst, reg, param); // mov reg,param
- emit_and_m32_r32(dst, memref, reg); // and [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_test_r32_p32 - test operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_test_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_test_r32_imm(dst, reg, param.immediate()); // test reg,param
- else if (param.is_memory())
- emit_test_m32_r32(dst, MABS(param.memory()), reg); // test [param],reg
else if (param.is_int_register())
- emit_test_r32_r32(dst, reg, param.ireg()); // test reg,param
-}
-
-
-//-------------------------------------------------
-// emit_test_m32_p32 - test operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_test_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- emit_test_m32_imm(dst, memref, param.immediate()); // test [dest],param
- else if (param.is_memory())
{
- emit_mov_r32_p32(dst, REG_EAX, param); // mov reg,param
- emit_test_m32_r32(dst, memref, REG_EAX); // test [dest],reg
+ if (reg.id() != param.ireg())
+ a.mov(Gpd(param.ireg()), reg); // mov param,reg
+ a.mov(MABS(m_reghi[param.ireg()], 4), 0);
}
- else if (param.is_int_register())
- emit_test_m32_r32(dst, memref, param.ireg()); // test [dest],param
}
-//-------------------------------------------------
-// emit_or_r32_p32 - or operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_or_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
+template <typename T>
+void drcbe_x86::alu_op_param(Assembler &a, Inst::Id const opcode, Operand const &dst, be_parameter const &param, T &&optimize)
{
if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_mov_r32_imm(dst, reg, ~0); // mov reg,-1
- else
- emit_or_r32_imm(dst, reg, param.immediate()); // or reg,param
+ if (!optimize(a, dst, param))
+ a.emit(opcode, dst, param.immediate()); // op dst,param
}
else if (param.is_memory())
- emit_or_r32_m32(dst, reg, MABS(param.memory())); // or reg,[param]
- else if (param.is_int_register())
- emit_or_r32_r32(dst, reg, param.ireg()); // or reg,param
-}
-
-
-//-------------------------------------------------
-// emit_or_m32_p32 - or operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_or_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_mov_m32_imm(dst, memref, ~0); // mov [dest],-1
- else
- emit_or_m32_imm(dst, memref, param.immediate()); // or [dest],param
- }
- else
{
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32(dst, reg, param); // mov reg,param
- emit_or_m32_r32(dst, memref, reg); // or [dest],reg
- }
-}
-
-
-//-------------------------------------------------
-// emit_xor_r32_p32 - xor operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
+ if (dst.isMem())
+ {
+ // use temporary register for memory,memory
+ Gp const reg = param.select_register(eax);
-void drcbe_x86::emit_xor_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_not_r32(dst, reg); // not reg
+ a.mov(reg, MABS(param.memory())); // mov reg,param
+ a.emit(opcode, dst, reg); // op [dst],reg
+ }
+ else if (opcode != Inst::kIdTest)
+ // most instructions are register,memory
+ a.emit(opcode, dst, MABS(param.memory())); // op dst,[param]
else
- emit_xor_r32_imm(dst, reg, param.immediate()); // xor reg,param
+ // test instruction requires memory,register
+ a.emit(opcode, MABS(param.memory()), dst); // op [param],dst
}
- else if (param.is_memory())
- emit_xor_r32_m32(dst, reg, MABS(param.memory())); // xor reg,[param]
else if (param.is_int_register())
- emit_xor_r32_r32(dst, reg, param.ireg()); // xor reg,param
-}
-
-
-//-------------------------------------------------
-// emit_xor_m32_p32 - xor operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_xor_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_not_m32(dst, memref); // not [dest]
- else
- emit_xor_m32_imm(dst, memref, param.immediate()); // xor [dest],param
- }
- else
- {
- int reg = param.select_register(REG_EAX);
- emit_mov_r32_p32(dst, reg, param); // mov reg,param
- emit_xor_m32_r32(dst, memref, reg); // xor [dest],reg
- }
+ a.emit(opcode, dst, Gpd(param.ireg())); // op dst,param
}
-
-//-------------------------------------------------
-// emit_shl_r32_p32 - shl operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_shl_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
+template <typename T>
+void drcbe_x86::shift_op_param(Assembler &a, Inst::Id const opcode, size_t opsize, Operand const &dst, be_parameter const &param, T &&optimize, bool update_flags)
{
if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_shl_r32_imm(dst, reg, param.immediate()); // shl reg,param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_shl_r32_cl(dst, reg); // shl reg,cl
- }
-}
-
+ const uint32_t bitshift = param.immediate() & (opsize * 8 - 1);
-//-------------------------------------------------
-// emit_shl_m32_p32 - shl operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
+ if (!optimize(a, dst, param) && bitshift != 0)
+ a.emit(opcode, dst, imm(bitshift));
-void drcbe_x86::emit_shl_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_shl_m32_imm(dst, memref, param.immediate()); // shl [dest],param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_shl_m32_cl(dst, memref); // shl [dest],cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_shr_r32_p32 - shr operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_shr_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_shr_r32_imm(dst, reg, param.immediate()); // shr reg,param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_shr_r32_cl(dst, reg); // shr reg,cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_shr_m32_p32 - shr operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_shr_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_shr_m32_imm(dst, memref, param.immediate()); // shr [dest],param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_shr_m32_cl(dst, memref); // shr [dest],cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_sar_r32_p32 - sar operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sar_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_sar_r32_imm(dst, reg, param.immediate()); // sar reg,param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_sar_r32_cl(dst, reg); // sar reg,cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_sar_m32_p32 - sar operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sar_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_sar_m32_imm(dst, memref, param.immediate()); // sar [dest],param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_sar_m32_cl(dst, memref); // sar [dest],cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_rol_r32_p32 - rol operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_rol_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_rol_r32_imm(dst, reg, param.immediate()); // rol reg,param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_rol_r32_cl(dst, reg); // rol reg,cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_rol_m32_p32 - rol operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_rol_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_rol_m32_imm(dst, memref, param.immediate()); // rol [dest],param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_rol_m32_cl(dst, memref); // rol [dest],cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_ror_r32_p32 - ror operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
+ if (update_flags)
+ {
+ if ((bitshift == 0) && (opcode != Inst::kIdRcl) && (opcode != Inst::kIdRcr))
+ a.clc(); // throw away carry since it'll never be used
-void drcbe_x86::emit_ror_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_ror_r32_imm(dst, reg, param.immediate()); // ror reg,param
+ calculate_status_flags(a, dst, FLAG_S | FLAG_Z); // calculate status flags but preserve carry
+ }
}
else
{
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_ror_r32_cl(dst, reg); // ror reg,cl
- }
-}
+ Label calc = a.newLabel();
+ Label end = a.newLabel();
+ Gp shift = dst.as<Gpd>().id() == ecx.id() ? ebx : ecx;
-//-------------------------------------------------
-// emit_ror_m32_p32 - ror operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
+ a.pushfd(); // preserve flags for carry
-void drcbe_x86::emit_ror_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_ror_m32_imm(dst, memref, param.immediate()); // ror [dest],param
- }
- else
- {
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_ror_m32_cl(dst, memref); // ror [dest],cl
- }
-}
+ emit_mov_r32_p32(a, shift, param);
+ a.and_(shift, opsize * 8 - 1);
+ a.test(shift, shift);
-//-------------------------------------------------
-// emit_rcl_r32_p32 - rcl operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
+ a.short_().jnz(calc);
-void drcbe_x86::emit_rcl_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_rcl_r32_imm(dst, reg, param.immediate()); // rcl reg,param
- }
- else
- {
- emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param
- emit_rcl_r32_cl(dst, reg); // rcl reg,cl
- }
-}
+ a.popfd(); // preserved flags not needed so throw it away
+ if (update_flags && (opcode != Inst::kIdRcl) && (opcode != Inst::kIdRcr))
+ a.clc(); // throw away carry since it'll never be used
-//-------------------------------------------------
-// emit_rcl_m32_p32 - rcl operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
+ a.short_().jmp(end);
-void drcbe_x86::emit_rcl_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_rcl_m32_imm(dst, memref, param.immediate()); // rcl [dest],param
- }
- else
- {
- emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param
- emit_rcl_m32_cl(dst, memref); // rcl [dest],cl
- }
-}
+ a.bind(calc);
+ a.popfd(); // restore flags to keep carry for rolc/rorc
-//-------------------------------------------------
-// emit_rcr_r32_p32 - rcr operation to a 32-bit
-// register from a 32-bit parameter
-//-------------------------------------------------
+ a.emit(opcode, dst, shift);
-void drcbe_x86::emit_rcr_r32_p32(x86code *&dst, uint8_t reg, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_rcr_r32_imm(dst, reg, param.immediate()); // rcr reg,param
- }
- else
- {
- emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param
- emit_rcr_r32_cl(dst, reg); // rcr reg,cl
- }
-}
-
-
-//-------------------------------------------------
-// emit_rcr_m32_p32 - rcr operation to a 32-bit
-// memory location from a 32-bit parameter
-//-------------------------------------------------
+ a.bind(end);
-void drcbe_x86::emit_rcr_m32_p32(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- if (param.is_immediate())
- {
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- ;// skip
- else
- emit_rcr_m32_imm(dst, memref, param.immediate()); // rcr [dest],param
- }
- else
- {
- emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param
- emit_rcr_m32_cl(dst, memref); // rcr [dest],cl
+ if (update_flags)
+ calculate_status_flags(a, dst, FLAG_S | FLAG_Z); // calculate status flags but preserve carry
}
}
@@ -1622,39 +1650,39 @@ void drcbe_x86::emit_rcr_m32_p32(x86code *&dst, x86_memref memref, const be_para
// into a pair of registers
//-------------------------------------------------
-void drcbe_x86::emit_mov_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param)
+void drcbe_x86::emit_mov_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param)
{
if (param.is_immediate())
{
- if (reglo == REG_NONE)
+ if (!reglo.isValid())
;
- else if ((uint32_t)param.immediate() == 0)
- emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo
+ else if (u32(param.immediate()) == 0)
+ a.xor_(reglo, reglo); // xor reglo,reglo
else
- emit_mov_r32_imm(dst, reglo, param.immediate()); // mov reglo,param
- if (reghi == REG_NONE)
+ a.mov(reglo, param.immediate()); // mov reglo,param
+ if (!reghi.isValid())
;
- else if ((uint32_t)(param.immediate() >> 32) == 0)
- emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi
+ else if (u32(param.immediate() >> 32) == 0)
+ a.xor_(reghi, reghi); // xor reghi,reghi
else
- emit_mov_r32_imm(dst, reghi, param.immediate() >> 32); // mov reghi,param >> 32
+ a.mov(reghi, param.immediate() >> 32); // mov reghi,param >> 32
}
else if (param.is_memory())
{
- int skip_lower = can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), reglo);
- int skip_upper = can_skip_upper_load(dst, (uint32_t *)((uintptr_t)param.memory(4)), reghi);
- if (reglo != REG_NONE && !skip_lower)
- emit_mov_r32_m32(dst, reglo, MABS(param.memory())); // mov reglo,[param]
- if (reghi != REG_NONE && !skip_upper)
- emit_mov_r32_m32(dst, reghi, MABS(param.memory(4))); // mov reghi,[param+4]
+ int skip_lower = can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory(0)), reglo);
+ int skip_upper = can_skip_upper_load(a, (uint32_t *)((uintptr_t)param.memory(4)), reghi);
+ if (reglo.isValid() && !skip_lower)
+ a.mov(reglo, MABS(param.memory(0))); // mov reglo,[param]
+ if (reghi.isValid() && !skip_upper)
+ a.mov(reghi, MABS(param.memory(4))); // mov reghi,[param+4]
}
else if (param.is_int_register())
{
- int skip_upper = can_skip_upper_load(dst, m_reghi[param.ireg()], reghi);
- if (reglo != REG_NONE && reglo != param.ireg())
- emit_mov_r32_r32(dst, reglo, param.ireg()); // mov reglo,param
- if (reghi != REG_NONE && !skip_upper)
- emit_mov_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param]
+ int skip_upper = can_skip_upper_load(a, m_reghi[param.ireg()], reghi);
+ if (reglo.isValid() && reglo.id() != param.ireg())
+ a.mov(reglo, Gpd(param.ireg())); // mov reglo,param
+ if (reghi.isValid() && !skip_upper)
+ a.mov(reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param]
}
}
@@ -1665,31 +1693,31 @@ void drcbe_x86::emit_mov_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// affecting any flags
//-------------------------------------------------
-void drcbe_x86::emit_mov_r64_p64_keepflags(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param)
+void drcbe_x86::emit_mov_r64_p64_keepflags(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param)
{
if (param.is_immediate())
{
- if (reglo != REG_NONE)
- emit_mov_r32_imm(dst, reglo, param.immediate()); // mov reglo,param
- if (reghi != REG_NONE)
- emit_mov_r32_imm(dst, reghi, param.immediate() >> 32); // mov reghi,param >> 32
+ if (reglo.isValid())
+ a.mov(reglo, param.immediate()); // mov reglo,param
+ if (reghi.isValid())
+ a.mov(reghi, param.immediate() >> 32); // mov reghi,param >> 32
}
else if (param.is_memory())
{
- int skip_lower = can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), reglo);
- int skip_upper = can_skip_upper_load(dst, (uint32_t *)((uintptr_t)param.memory(4)), reghi);
- if (reglo != REG_NONE && !skip_lower)
- emit_mov_r32_m32(dst, reglo, MABS(param.memory())); // mov reglo,[param]
- if (reghi != REG_NONE && !skip_upper)
- emit_mov_r32_m32(dst, reghi, MABS(param.memory(4))); // mov reghi,[param+4]
+ int skip_lower = can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory(0)), reglo);
+ int skip_upper = can_skip_upper_load(a, (uint32_t *)((uintptr_t)param.memory(4)), reghi);
+ if (reglo.isValid() && !skip_lower)
+ a.mov(reglo, MABS(param.memory(0))); // mov reglo,[param]
+ if (reghi.isValid() && !skip_upper)
+ a.mov(reghi, MABS(param.memory(4))); // mov reghi,[param+4]
}
else if (param.is_int_register())
{
- int skip_upper = can_skip_upper_load(dst, m_reghi[param.ireg()], reghi);
- if (reglo != REG_NONE && reglo != param.ireg())
- emit_mov_r32_r32(dst, reglo, param.ireg()); // mov reglo,param
- if (reghi != REG_NONE && !skip_upper)
- emit_mov_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param]
+ int skip_upper = can_skip_upper_load(a, m_reghi[param.ireg()], reghi);
+ if (reglo.isValid() && reglo.id() != param.ireg())
+ a.mov(reglo, Gpd(param.ireg())); // mov reglo,param
+ if (reghi.isValid() && !skip_upper)
+ a.mov(reghi, MABS(m_reghi[param.ireg()])); // mov reghi,reghi[param]
}
}
@@ -1699,27 +1727,30 @@ void drcbe_x86::emit_mov_r64_p64_keepflags(x86code *&dst, uint8_t reglo, uint8_t
// into a memory location
//-------------------------------------------------
-void drcbe_x86::emit_mov_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param)
+void drcbe_x86::emit_mov_m64_p64(Assembler &a, Mem const &memref, be_parameter const &param)
{
+ Mem memref_lo = memref.cloneAdjusted(0); memref_lo.setSize(4);
+ Mem memref_hi = memref.cloneAdjusted(4); memref_hi.setSize(4);
+
if (param.is_immediate())
{
- emit_mov_m32_imm(dst, memref + 0, param.immediate()); // mov [mem],param
- emit_mov_m32_imm(dst, memref + 4, param.immediate() >> 32); // mov [mem],param >> 32
+ a.mov(memref_lo, param.immediate()); // mov [mem],param
+ a.mov(memref_hi, param.immediate() >> 32); // mov [mem],param >> 32
}
else if (param.is_memory())
{
- int skip_lower = can_skip_lower_load(dst, (uint32_t *)((uintptr_t)param.memory()), REG_EAX);
+ int skip_lower = can_skip_lower_load(a, (uint32_t *)((uintptr_t)param.memory()), eax);
if (!skip_lower)
- emit_mov_r32_m32(dst, REG_EAX, MABS(param.memory())); // mov eax,[param]
- emit_mov_m32_r32(dst, memref + 0, REG_EAX); // mov [mem],eax
- emit_mov_r32_m32(dst, REG_EAX, MABS(param.memory(4))); // mov eax,[param+4]
- emit_mov_m32_r32(dst, memref + 4, REG_EAX); // mov [mem+4],eax
+ a.mov(eax, MABS(param.memory(0))); // mov eax,[param]
+ a.mov(memref_lo, eax); // mov [mem],eax
+ a.mov(eax, MABS(param.memory(4))); // mov eax,[param+4]
+ a.mov(memref_hi, eax); // mov [mem+4],eax
}
else if (param.is_int_register())
{
- emit_mov_m32_r32(dst, memref + 0, param.ireg()); // mov [mem],param
- emit_mov_r32_m32(dst, REG_EAX, MABS(m_reghi[param.ireg()])); // mov eax,[param.hi]
- emit_mov_m32_r32(dst, memref + 4, REG_EAX); // mov [mem+4],eax
+ a.mov(memref_lo, Gpd(param.ireg())); // mov [mem],param
+ a.mov(eax, MABS(m_reghi[param.ireg()])); // mov eax,[param.hi]
+ a.mov(memref_hi, eax); // mov [mem+4],eax
}
}
@@ -1729,287 +1760,22 @@ void drcbe_x86::emit_mov_m64_p64(x86code *&dst, x86_memref memref, const be_para
// into a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_mov_p64_r64(x86code *&dst, const be_parameter &param, uint8_t reglo, uint8_t reghi)
+void drcbe_x86::emit_mov_p64_r64(Assembler &a, be_parameter const &param, Gp const &reglo, Gp const &reghi)
{
assert(!param.is_immediate());
if (param.is_memory())
{
- emit_mov_m32_r32(dst, MABS(param.memory()), reglo); // mov [param],reglo
- emit_mov_m32_r32(dst, MABS(param.memory(4)), reghi); // mov [param+4],reghi
- }
- else if (param.is_int_register())
- {
- if (reglo != param.ireg())
- emit_mov_r32_r32(dst, param.ireg(), reglo); // mov param,reglo
- emit_mov_m32_r32(dst, MABS(m_reghi[param.ireg()]), reghi); // mov reghi[param],reghi
- }
- set_last_lower_reg(dst, param, reglo);
- set_last_upper_reg(dst, param, reghi);
-}
-
-
-//-------------------------------------------------
-// emit_add_r64_p64 - add operation to a 64-bit
-// pair of registers from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_add_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_memory())
- {
- emit_add_r32_m32(dst, reglo, MABS(param.memory())); // add reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_r32_m32(dst, reghi, MABS(param.memory(4))); // adc reghi,[param]
- }
- else if (param.is_immediate())
- {
- emit_add_r32_imm(dst, reglo, param.immediate()); // add reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_r32_imm(dst, reghi, param.immediate() >> 32); // adc reghi,param >> 32
- }
- else if (param.is_int_register())
- {
- emit_add_r32_r32(dst, reglo, param.ireg()); // add reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // adc reghi,reghi[param]
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_add_m64_p64 - add operation to a 64-bit
-// memory location from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_add_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_immediate())
- {
- emit_add_m32_imm(dst, memref, param.immediate()); // add [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_m32_imm(dst, memref + 4, param.immediate() >> 32); // adc [dest+4],param >> 32
- }
- else
- {
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_add_m32_r32(dst, memref, reglo); // add [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_m32_r32(dst, memref + 4, REG_EDX); // adc [dest+4],edx
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_adc_r64_p64 - adc operation to a 64-bit
-// pair of registers from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_adc_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_memory())
- {
- emit_adc_r32_m32(dst, reglo, MABS(param.memory())); // adc reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_r32_m32(dst, reghi, MABS(param.memory(4))); // adc reghi,[param]
- }
- else if (param.is_immediate())
- {
- emit_adc_r32_imm(dst, reglo, param.immediate()); // adc reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_r32_imm(dst, reghi, param.immediate() >> 32); // adc reghi,param >> 32
+ a.mov(MABS(param.memory(0)), reglo); // mov [param],reglo
+ a.mov(MABS(param.memory(4)), reghi); // mov [param+4],reghi
}
else if (param.is_int_register())
{
- emit_adc_r32_r32(dst, reglo, param.ireg()); // adc reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // adc reghi,reghi[param]
+ if (reglo.id() != param.ireg())
+ a.mov(Gpd(param.ireg()), reglo); // mov param,reglo
+ a.mov(MABS(m_reghi[param.ireg()]), reghi); // mov reghi[param],reghi
}
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_adc_m64_p64 - adc operation to a 64-bit
-// memory location from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_adc_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_immediate())
- {
- emit_adc_m32_imm(dst, memref, param.immediate()); // adc [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_m32_imm(dst, memref + 4, param.immediate() >> 32); // adc [dest+4],param >> 32
- }
- else
- {
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64_keepflags(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_adc_m32_r32(dst, memref, reglo); // adc [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_adc_m32_r32(dst, memref + 4, REG_EDX); // adc [dest+4],edx
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_sub_r64_p64 - sub operation to a 64-bit
-// pair of registers from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sub_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_memory())
- {
- emit_sub_r32_m32(dst, reglo, MABS(param.memory())); // sub reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_m32(dst, reghi, MABS(param.memory(4))); // sbb reghi,[param]
- }
- else if (param.is_immediate())
- {
- emit_sub_r32_imm(dst, reglo, param.immediate()); // sub reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_imm(dst, reghi, param.immediate() >> 32); // sbb reghi,param >> 32
- }
- else if (param.is_int_register())
- {
- emit_sub_r32_r32(dst, reglo, param.ireg()); // sub reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // sbb reghi,reghi[param]
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_sub_m64_p64 - sub operation to a 64-bit
-// memory location from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sub_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_immediate())
- {
- emit_sub_m32_imm(dst, memref, param.immediate()); // sub [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_m32_imm(dst, memref + 4, param.immediate() >> 32); // sbb [dest+4],param >> 32
- }
- else
- {
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_sub_m32_r32(dst, memref, reglo); // sub [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_m32_r32(dst, memref + 4, REG_EDX); // sbb [dest+4],edx
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_sbb_r64_p64 - sbb operation to a 64-bit
-// pair of registers from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sbb_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_memory())
- {
- emit_sbb_r32_m32(dst, reglo, MABS(param.memory())); // sbb reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_m32(dst, reghi, MABS(param.memory(4))); // sbb reghi,[param]
- }
- else if (param.is_immediate())
- {
- emit_sbb_r32_imm(dst, reglo, param.immediate()); // sbb reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_imm(dst, reghi, param.immediate() >> 32); // sbb reghi,param >> 32
- }
- else if (param.is_int_register())
- {
- emit_sbb_r32_r32(dst, reglo, param.ireg()); // sbb reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // sbb reghi,reghi[param]
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_sbb_m64_p64 - sbb operation to a 64-bit
-// memory location from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_sbb_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_immediate())
- {
- emit_sbb_m32_imm(dst, memref, param.immediate()); // sbb [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_m32_imm(dst, memref + 4, param.immediate() >> 32); // sbb [dest+4],param >> 32
- }
- else
- {
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64_keepflags(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_sbb_m32_r32(dst, memref, reglo); // sbb [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_m32_r32(dst, memref + 4, REG_EDX); // sbb [dest+4],edx
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_cmp_r64_p64 - sub operation to a 64-bit
-// pair of registers from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_cmp_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
-{
- int saveflags = (inst.flags() != FLAG_Z && (inst.flags() & FLAG_Z) != 0);
- if (param.is_memory())
- {
- emit_sub_r32_m32(dst, reglo, MABS(param.memory())); // sub reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_m32(dst, reghi, MABS(param.memory(4))); // sbb reghi,[param]
- }
- else if (param.is_immediate())
- {
- emit_sub_r32_imm(dst, reglo, param.immediate()); // sub reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_imm(dst, reghi, param.immediate() >> 32); // sbb reghi,param >> 32
- }
- else if (param.is_int_register())
- {
- emit_sub_r32_r32(dst, reglo, param.ireg()); // sub reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_sbb_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // sbb reghi,reghi[param]
- }
- if (inst.flags() == FLAG_Z)
- emit_or_r32_r32(dst, reghi, reglo); // or reghi,reglo
- else if (saveflags)
- emit_combine_z_flags(dst);
+ set_last_lower_reg(a, param, reglo);
+ set_last_upper_reg(a, param, reghi);
}
@@ -2018,39 +1784,39 @@ void drcbe_x86::emit_cmp_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_and_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_and_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
int saveflags = ((inst.flags() & FLAG_Z) != 0);
if (param.is_memory())
{
- emit_and_r32_m32(dst, reglo, MABS(param.memory())); // and reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_and_r32_m32(dst, reghi, MABS(param.memory(4))); // and reghi,[param]
+ a.and_(reglo, MABS(param.memory(0))); // and reglo,[param]
+ if (saveflags) a.pushfd(); // pushf
+ a.and_(reghi, MABS(param.memory(4))); // and reghi,[param]
}
else if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
+ if (!inst.flags() && u32(param.immediate()) == 0xffffffffU)
;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo
+ else if (!inst.flags() && u32(param.immediate()) == 0)
+ a.xor_(reglo, reglo); // xor reglo,reglo
else
- emit_and_r32_imm(dst, reglo, param.immediate()); // and reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff)
+ a.and_(reglo, param.immediate()); // and reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU)
;// skip
- else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0)
- emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi
+ else if (!inst.flags() && u32(param.immediate() >> 32) == 0)
+ a.xor_(reghi, reghi); // xor reghi,reghi
else
- emit_and_r32_imm(dst, reghi, param.immediate() >> 32); // and reghi,param >> 32
+ a.and_(reghi, param.immediate() >> 32); // and reghi,param >> 32
}
else if (param.is_int_register())
{
- emit_and_r32_r32(dst, reglo, param.ireg()); // and reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_and_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // and reghi,reghi[param]
+ a.and_(reglo, Gpd(param.ireg())); // and reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ a.and_(reghi, MABS(m_reghi[param.ireg()])); // and reghi,reghi[param]
}
if (saveflags)
- emit_combine_z_flags(dst);
+ emit_combine_z_flags(a);
}
@@ -2059,93 +1825,35 @@ void drcbe_x86::emit_and_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// memory location from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_and_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_and_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const &param, const instruction &inst)
{
int saveflags = ((inst.flags() & FLAG_Z) != 0);
if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
+ if (!inst.flags() && u32(param.immediate()) == 0xffffffffU)
;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
- emit_mov_m32_imm(dst, memref, 0); // mov [dest],0
+ else if (!inst.flags() && u32(param.immediate()) == 0)
+ a.mov(memref_lo, 0); // mov [dest],0
else
- emit_and_m32_imm(dst, memref, param.immediate()); // and [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff)
+ a.and_(memref_lo, param.immediate()); // and [dest],param
+ if (saveflags) a.pushfd(); // pushf
+ if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU)
;// skip
- else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0)
- emit_mov_m32_imm(dst, memref + 4, 0); // mov [dest+4],0
+ else if (!inst.flags() && u32(param.immediate() >> 32) == 0)
+ a.mov(memref_hi, 0); // mov [dest+4],0
else
- emit_and_m32_imm(dst, memref + 4, param.immediate() >> 32); // and [dest+4],param >> 32
- }
- else
- {
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_and_m32_r32(dst, memref, reglo); // and [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_and_m32_r32(dst, memref + 4, REG_EDX); // and [dest+4],edx
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_test_r64_p64 - test operation to a 64-bit
-// pair of registers from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_test_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_memory())
- {
- emit_test_m32_r32(dst, MABS(param.memory()), reglo); // test [param],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_test_m32_r32(dst, MABS(param.memory(4)), reghi); // test [param],reghi
- }
- else if (param.is_immediate())
- {
- emit_test_r32_imm(dst, reglo, param.immediate()); // test reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_test_r32_imm(dst, reghi, param.immediate() >> 32); // test reghi,param >> 32
- }
- else if (param.is_int_register())
- {
- emit_test_r32_r32(dst, reglo, param.ireg()); // test reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_test_m32_r32(dst, MABS(m_reghi[param.ireg()]), reghi); // test reghi[param],reghi
- }
- if (saveflags)
- emit_combine_z_flags(dst);
-}
-
-
-//-------------------------------------------------
-// emit_test_m64_p64 - test operation to a 64-bit
-// memory location from a 64-bit parameter
-//-------------------------------------------------
-
-void drcbe_x86::emit_test_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
-{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- if (param.is_immediate())
- {
- emit_test_m32_imm(dst, memref, param.immediate()); // test [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- emit_test_m32_imm(dst, memref + 4, param.immediate() >> 32); // test [dest+4],param >> 32
+ a.and_(memref_hi, param.immediate() >> 32); // and [dest+4],param >> 32
}
else
{
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_test_m32_r32(dst, memref, reglo); // test [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_test_m32_r32(dst, memref + 4, REG_EDX); // test [dest+4],edx
+ Gp const reglo = (param.is_int_register()) ? Gpd(param.ireg()) : eax;
+ emit_mov_r64_p64(a, reglo, edx, param); // mov edx:reglo,param
+ a.and_(memref_lo, reglo); // and [dest],reglo
+ if (saveflags) a.pushfd(); // pushf
+ a.and_(memref_hi, edx); // and [dest+4],edx
}
if (saveflags)
- emit_combine_z_flags(dst);
+ emit_combine_z_flags(a);
}
@@ -2154,39 +1862,39 @@ void drcbe_x86::emit_test_m64_p64(x86code *&dst, x86_memref memref, const be_par
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_or_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_or_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
int saveflags = ((inst.flags() & FLAG_Z) != 0);
if (param.is_memory())
{
- emit_or_r32_m32(dst, reglo, MABS(param.memory())); // or reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_or_r32_m32(dst, reghi, MABS(param.memory(4))); // or reghi,[param]
+ a.or_(reglo, MABS(param.memory(0))); // or reglo,[param]
+ if (saveflags) a.pushfd(); // pushf
+ a.or_(reghi, MABS(param.memory(4))); // or reghi,[param]
}
else if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
+ if (!inst.flags() && u32(param.immediate()) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_mov_r32_imm(dst, reglo, ~0); // mov reglo,-1
+ else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU)
+ a.mov(reglo, ~0); // mov reglo,-1
else
- emit_or_r32_imm(dst, reglo, param.immediate()); // or reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0)
+ a.or_(reglo, param.immediate()); // or reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ if (!inst.flags() && u32(param.immediate() >> 32) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff)
- emit_mov_r32_imm(dst, reghi, ~0); // mov reghi,-1
+ else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU)
+ a.mov(reghi, ~0); // mov reghi,-1
else
- emit_or_r32_imm(dst, reghi, param.immediate() >> 32); // or reghi,param >> 32
+ a.or_(reghi, param.immediate() >> 32); // or reghi,param >> 32
}
else if (param.is_int_register())
{
- emit_or_r32_r32(dst, reglo, param.ireg()); // or reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_or_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // or reghi,reghi[param]
+ a.or_(reglo, Gpd(param.ireg())); // or reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ a.or_(reghi, MABS(m_reghi[param.ireg()])); // or reghi,reghi[param]
}
if (saveflags)
- emit_combine_z_flags(dst);
+ emit_combine_z_flags(a);
}
@@ -2195,35 +1903,35 @@ void drcbe_x86::emit_or_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, con
// memory location from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_or_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_or_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const &param, const instruction &inst)
{
int saveflags = ((inst.flags() & FLAG_Z) != 0);
if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
+ if (!inst.flags() && u32(param.immediate()) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_mov_m32_imm(dst, memref, ~0); // mov [dest],-1
+ else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU)
+ a.mov(memref_lo, ~0); // mov [dest],-1
else
- emit_or_m32_imm(dst, memref, param.immediate()); // or [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0)
+ a.or_(memref_lo, param.immediate()); // or [dest],param
+ if (saveflags) a.pushfd(); // pushf
+ if (!inst.flags() && u32(param.immediate() >> 32) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff)
- emit_mov_m32_imm(dst, memref + 4, ~0); // mov [dest+4],-1
+ else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU)
+ a.mov(memref_hi, ~0); // mov [dest+4],-1
else
- emit_or_m32_imm(dst, memref + 4, param.immediate() >> 32); // or [dest+4],param >> 32
+ a.or_(memref_hi, param.immediate() >> 32); // or [dest+4],param >> 32
}
else
{
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_or_m32_r32(dst, memref, reglo); // or [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_or_m32_r32(dst, memref + 4, REG_EDX); // or [dest+4],edx
+ Gp const reglo = (param.is_int_register()) ? Gpd(param.ireg()) : eax;
+ emit_mov_r64_p64(a, reglo, edx, param); // mov edx:reglo,param
+ a.or_(memref_lo, reglo); // or [dest],reglo
+ if (saveflags) a.pushfd(); // pushf
+ a.or_(memref_hi, edx); // or [dest+4],edx
}
if (saveflags)
- emit_combine_z_flags(dst);
+ emit_combine_z_flags(a);
}
@@ -2232,39 +1940,39 @@ void drcbe_x86::emit_or_m64_p64(x86code *&dst, x86_memref memref, const be_param
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_xor_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_xor_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
int saveflags = ((inst.flags() & FLAG_Z) != 0);
if (param.is_memory())
{
- emit_xor_r32_m32(dst, reglo, MABS(param.memory())); // xor reglo,[param]
- if (saveflags) emit_pushf(dst); // pushf
- emit_xor_r32_m32(dst, reghi, MABS(param.memory(4))); // xor reghi,[param]
+ a.xor_(reglo, MABS(param.memory(0))); // xor reglo,[param]
+ if (saveflags) a.pushfd(); // pushf
+ a.xor_(reghi, MABS(param.memory(4))); // xor reghi,[param]
}
else if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
+ if (!inst.flags() && u32(param.immediate()) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_not_r32(dst, reglo); // not reglo
+ else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU)
+ a.not_(reglo); // not reglo
else
- emit_xor_r32_imm(dst, reglo, param.immediate()); // xor reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0)
+ a.xor_(reglo, param.immediate()); // xor reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ if (!inst.flags() && u32(param.immediate() >> 32) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff)
- emit_not_r32(dst, reghi); // not reghi
+ else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU)
+ a.not_(reghi); // not reghi
else
- emit_xor_r32_imm(dst, reghi, param.immediate() >> 32); // xor reghi,param >> 32
+ a.xor_(reghi, param.immediate() >> 32); // xor reghi,param >> 32
}
else if (param.is_int_register())
{
- emit_xor_r32_r32(dst, reglo, param.ireg()); // xor reglo,param
- if (saveflags) emit_pushf(dst); // pushf
- emit_xor_r32_m32(dst, reghi, MABS(m_reghi[param.ireg()])); // xor reghi,reghi[param]
+ a.xor_(reglo, Gpd(param.ireg())); // xor reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ a.xor_(reghi, MABS(m_reghi[param.ireg()])); // xor reghi,reghi[param]
}
if (saveflags)
- emit_combine_z_flags(dst);
+ emit_combine_z_flags(a);
}
@@ -2273,35 +1981,35 @@ void drcbe_x86::emit_xor_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// memory location from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_xor_m64_p64(x86code *&dst, x86_memref memref, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_xor_m64_p64(Assembler &a, Mem const &memref_lo, Mem const &memref_hi, be_parameter const &param, const instruction &inst)
{
int saveflags = ((inst.flags() & FLAG_Z) != 0);
if (param.is_immediate())
{
- if (inst.flags() == 0 && (uint32_t)param.immediate() == 0)
+ if (!inst.flags() && u32(param.immediate()) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)param.immediate() == 0xffffffff)
- emit_not_m32(dst, memref); // not [dest]
+ else if (!inst.flags() && u32(param.immediate()) == 0xffffffffU)
+ a.not_(memref_lo); // not [dest]
else
- emit_xor_m32_imm(dst, memref, param.immediate()); // xor [dest],param
- if (saveflags) emit_pushf(dst); // pushf
- if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0)
+ a.xor_(memref_lo, param.immediate()); // xor [dest],param
+ if (saveflags) a.pushfd(); // pushf
+ if (!inst.flags() && u32(param.immediate() >> 32) == 0)
;// skip
- else if (inst.flags() == 0 && (uint32_t)(param.immediate() >> 32) == 0xffffffff)
- emit_not_m32(dst, memref + 4); // not [dest+4]
+ else if (!inst.flags() && u32(param.immediate() >> 32) == 0xffffffffU)
+ a.not_(memref_hi); // not [dest+4]
else
- emit_xor_m32_imm(dst, memref + 4, param.immediate() >> 32); // xor [dest+4],param >> 32
+ a.xor_(memref_hi, param.immediate() >> 32); // xor [dest+4],param >> 32
}
else
{
- int reglo = (param.is_int_register()) ? param.ireg() : REG_EAX;
- emit_mov_r64_p64(dst, reglo, REG_EDX, param); // mov edx:reglo,param
- emit_xor_m32_r32(dst, memref, reglo); // xor [dest],reglo
- if (saveflags) emit_pushf(dst); // pushf
- emit_xor_m32_r32(dst, memref + 4, REG_EDX); // xor [dest+4],edx
+ Gp const reglo = (param.is_int_register()) ? Gpd(param.ireg()) : eax;
+ emit_mov_r64_p64(a, reglo, edx, param); // mov edx:reglo,param
+ a.xor_(memref_lo, reglo); // xor [dest],reglo
+ if (saveflags) a.pushfd(); // pushf
+ a.xor_(memref_hi, edx); // xor [dest+4],edx
}
if (saveflags)
- emit_combine_z_flags(dst);
+ emit_combine_z_flags(a);
}
@@ -2310,13 +2018,13 @@ void drcbe_x86::emit_xor_m64_p64(x86code *&dst, x86_memref memref, const be_para
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_shl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_shl_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = (inst.flags() != 0);
+ int saveflags = inst.flags() != 0;
if (param.is_immediate())
{
int count = param.immediate() & 63;
- if (inst.flags() == 0 && count == 0)
+ if (!inst.flags() && count == 0)
;// skip
else
{
@@ -2324,55 +2032,98 @@ void drcbe_x86::emit_shl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
{
if (inst.flags() != 0)
{
- emit_shld_r32_r32_imm(dst, reghi, reglo, 31); // shld reghi,reglo,31
- emit_shl_r32_imm(dst, reglo, 31); // shl reglo,31
+ a.shld(reghi, reglo, 31); // shld reghi,reglo,31
+ a.shl(reglo, 31); // shl reglo,31
count -= 31;
}
else
{
- emit_mov_r32_r32(dst, reghi, reglo); // mov reghi,reglo
- emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo
+ a.mov(reghi, reglo); // mov reghi,reglo
+ a.xor_(reglo, reglo); // xor reglo,reglo
count -= 32;
}
}
if (inst.flags() != 0 || count > 0)
{
- emit_shld_r32_r32_imm(dst, reghi, reglo, count); // shld reghi,reglo,count
- if (saveflags) emit_pushf(dst); // pushf
- emit_shl_r32_imm(dst, reglo, count); // shl reglo,count
+ a.shld(reghi, reglo, count); // shld reghi,reglo,count
+ if (saveflags && count != 0) a.pushfd(); // pushf
+ a.shl(reglo, count); // shl reglo,count
+ }
+ }
+
+ if (saveflags)
+ {
+ if (count == 0)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+ else
+ {
+ emit_combine_z_shl_flags(a);
}
}
}
else
{
- emit_link skip1, skip2;
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1
+ Label skipall = a.newLabel();
+ Label end = a.newLabel();
+ Label skip1 = a.newLabel();
+ Label skip2 = a.newLabel();
+
+ emit_mov_r32_p32(a, ecx, param); // mov ecx,param
+
+ a.and_(ecx, 63);
+ a.test(ecx, ecx);
+ a.short_().jz(skipall);
+
+ a.test(ecx, 0x20); // test ecx,0x20
+ a.short_().jz(skip1); // jz skip1
if (inst.flags() != 0)
{
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_shld_r32_r32_imm(dst, reghi, reglo, 31); // shld reghi,reglo,31
- emit_shl_r32_imm(dst, reglo, 31); // shl reglo,31
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_shld_r32_r32_imm(dst, reghi, reglo, 31); // shld reghi,reglo,31
- emit_shl_r32_imm(dst, reglo, 31); // shl reglo,31
- track_resolve_link(dst, skip2); // skip2:
+ a.sub(ecx, 31); // sub ecx,31
+ a.shld(reghi, reglo, 31); // shld reghi,reglo,31
+ a.shl(reglo, 31); // shl reglo,31
+ a.test(ecx, 0x20); // test ecx,0x20
+ a.short_().jz(skip2); // jz skip2
+ a.sub(ecx, 31); // sub ecx,31
+ a.shld(reghi, reglo, 31); // shld reghi,reglo,31
+ a.shl(reglo, 31); // shl reglo,31
+ a.bind(skip2); // skip2:
+ reset_last_upper_lower_reg();
}
else
{
- emit_mov_r32_r32(dst, reghi, reglo); // mov reghi,reglo
- emit_xor_r32_r32(dst, reglo, reglo); // xor reglo,reglo
+ a.mov(reghi, reglo); // mov reghi,reglo
+ a.xor_(reglo, reglo); // xor reglo,reglo
}
- track_resolve_link(dst, skip1); // skip1:
- emit_shld_r32_r32_cl(dst, reghi, reglo); // shld reghi,reglo,cl
- if (saveflags) emit_pushf(dst); // pushf
- emit_shl_r32_cl(dst, reglo); // shl reglo,cl
+ a.bind(skip1); // skip1:
+ reset_last_upper_lower_reg();
+ a.shld(reghi, reglo, cl); // shld reghi,reglo,cl
+ if (saveflags) a.pushfd(); // pushf
+ a.shl(reglo, cl); // shl reglo,cl
+
+ if (saveflags)
+ {
+ emit_combine_z_shl_flags(a);
+
+ a.short_().jmp(end);
+ }
+
+ a.bind(skipall);
+
+ if (saveflags)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+
+ a.bind(end);
}
- if (saveflags)
- emit_combine_z_shl_flags(dst);
}
@@ -2381,13 +2132,13 @@ void drcbe_x86::emit_shl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_shr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_shr_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
+ int saveflags = inst.flags() != 0;
if (param.is_immediate())
{
int count = param.immediate() & 63;
- if (inst.flags() == 0 && count == 0)
+ if (!inst.flags() && count == 0)
;// skip
else
{
@@ -2395,55 +2146,114 @@ void drcbe_x86::emit_shr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
{
if (inst.flags() != 0)
{
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_shr_r32_imm(dst, reghi, 31); // shr reghi,31
+ a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31
+ a.shr(reghi, 31); // shr reghi,31
count -= 31;
}
else
{
- emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi
- emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi
+ a.mov(reglo, reghi); // mov reglo,reghi
+ a.xor_(reghi, reghi); // xor reghi,reghi
count -= 32;
}
}
if (inst.flags() != 0 || count > 0)
{
- emit_shrd_r32_r32_imm(dst, reglo, reghi, count); // shrd reglo,reghi,count
- if (saveflags) emit_pushf(dst); // pushf
- emit_shr_r32_imm(dst, reghi, count); // shr reghi,count
+ a.shrd(reglo, reghi, count); // shrd reglo,reghi,count
+ if (saveflags && count != 0) a.pushfd(); // pushf
+ a.shr(reghi, count); // shr reghi,count
+ }
+ }
+
+ if (saveflags)
+ {
+ if (count == 0)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+ else
+ {
+ // take carry from lower register's flags
+ a.pushfd();
+ a.mov(ecx, dword_ptr(esp, 4));
+ a.and_(ecx, 0x01); // carry flag
+ a.and_(dword_ptr(esp, 0), ~0x01);
+ a.or_(dword_ptr(esp, 0), ecx);
+ a.popfd();
+
+ emit_combine_z_flags(a);
}
}
}
else
{
- emit_link skip1, skip2;
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1
+ Label skipall = a.newLabel();
+ Label end = a.newLabel();
+ Label skip1 = a.newLabel();
+ Label skip2 = a.newLabel();
+
+ emit_mov_r32_p32(a, ecx, param); // mov ecx,param
+
+ a.and_(ecx, 63);
+ a.test(ecx, ecx);
+ a.short_().jz(skipall);
+
+ a.test(ecx, 0x20); // test ecx,0x20
+ a.short_().jz(skip1); // jz skip1
if (inst.flags() != 0)
{
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_shr_r32_imm(dst, reghi, 31); // shr reghi,31
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_shr_r32_imm(dst, reghi, 31); // shr reghi,31
- track_resolve_link(dst, skip2); // skip2:
+ a.sub(ecx, 31); // sub ecx,31
+ a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31
+ a.shr(reghi, 31); // shr reghi,31
+ a.test(ecx, 0x20); // test ecx,0x20
+ a.short_().jz(skip2); // jz skip2
+ a.sub(ecx, 31); // sub ecx,31
+ a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31
+ a.shr(reghi, 31); // shr reghi,31
+ a.bind(skip2); // skip2:
+ reset_last_upper_lower_reg();
}
else
{
- emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi
- emit_xor_r32_r32(dst, reghi, reghi); // xor reghi,reghi
+ a.mov(reglo, reghi); // mov reglo,reghi
+ a.xor_(reghi, reghi); // xor reghi,reghi
}
- track_resolve_link(dst, skip1); // skip1:
- emit_shrd_r32_r32_cl(dst, reglo, reghi); // shrd reglo,reghi,cl
- if (saveflags) emit_pushf(dst); // pushf
- emit_shr_r32_cl(dst, reghi); // shr reghi,cl
+ a.bind(skip1); // skip1:
+ reset_last_upper_lower_reg();
+ a.shrd(reglo, reghi, cl); // shrd reglo,reghi,cl
+ if (saveflags) a.pushfd(); // pushf
+ a.shr(reghi, cl); // shr reghi,cl
+
+ if (saveflags)
+ {
+ // take carry from lower register's flags
+ a.pushfd();
+ a.mov(ecx, dword_ptr(esp, 4));
+ a.and_(ecx, 0x01); // carry flag
+ a.and_(dword_ptr(esp, 0), ~0x01);
+ a.or_(dword_ptr(esp, 0), ecx);
+ a.popfd();
+
+ emit_combine_z_flags(a);
+
+ a.short_().jmp(end);
+ }
+
+ a.bind(skipall);
+
+ if (saveflags)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+
+ a.bind(end);
}
- if (saveflags)
- emit_combine_z_flags(dst);
}
@@ -2452,13 +2262,13 @@ void drcbe_x86::emit_shr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_sar_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_sar_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
+ int saveflags = inst.flags() != 0;
if (param.is_immediate())
{
int count = param.immediate() & 63;
- if (inst.flags() == 0 && count == 0)
+ if (!inst.flags() && count == 0)
;// skip
else
{
@@ -2466,55 +2276,114 @@ void drcbe_x86::emit_sar_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
{
if (inst.flags() != 0)
{
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31
+ a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31
+ a.sar(reghi, 31); // sar reghi,31
count -= 31;
}
else
{
- emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi
- emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31
+ a.mov(reglo, reghi); // mov reglo,reghi
+ a.sar(reghi, 31); // sar reghi,31
count -= 32;
}
}
if (inst.flags() != 0 || count > 0)
{
- emit_shrd_r32_r32_imm(dst, reglo, reghi, count); // shrd reglo,reghi,count
- if (saveflags) emit_pushf(dst); // pushf
- emit_sar_r32_imm(dst, reghi, count); // sar reghi,count
+ a.shrd(reglo, reghi, count); // shrd reglo,reghi,count
+ if (saveflags && count != 0) a.pushfd(); // pushf
+ a.sar(reghi, count); // sar reghi,count
+ }
+ }
+
+ if (saveflags)
+ {
+ if (count == 0)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+ else
+ {
+ // take carry from lower register's flags
+ a.pushfd();
+ a.mov(ecx, dword_ptr(esp, 4));
+ a.and_(ecx, 0x01); // carry flag
+ a.and_(dword_ptr(esp, 0), ~0x01);
+ a.or_(dword_ptr(esp, 0), ecx);
+ a.popfd();
+
+ emit_combine_z_flags(a);
}
}
}
else
{
- emit_link skip1, skip2;
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1
+ Label skip1 = a.newLabel();
+ Label skip2 = a.newLabel();
+ Label skipall = a.newLabel();
+ Label end = a.newLabel();
+
+ emit_mov_r32_p32(a, ecx, param); // mov ecx,param
+
+ a.and_(ecx, 63);
+ a.test(ecx, ecx);
+ a.short_().jz(skipall);
+
+ a.test(ecx, 0x20); // test ecx,0x20
+ a.short_().jz(skip1); // jz skip1
if (inst.flags() != 0)
{
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31
- track_resolve_link(dst, skip2); // skip2:
+ a.sub(ecx, 31); // sub ecx,31
+ a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31
+ a.sar(reghi, 31); // sar reghi,31
+ a.test(ecx, 0x20); // test ecx,0x20
+ a.short_().jz(skip2); // jz skip
+ a.sub(ecx, 31); // sub ecx,31
+ a.shrd(reglo, reghi, 31); // shrd reglo,reghi,31
+ a.sar(reghi, 31); // sar reghi,31
+ a.bind(skip2); // skip2:
+ reset_last_upper_lower_reg();
}
else
{
- emit_mov_r32_r32(dst, reglo, reghi); // mov reglo,reghi
- emit_sar_r32_imm(dst, reghi, 31); // sar reghi,31
+ a.mov(reglo, reghi); // mov reglo,reghi
+ a.sar(reghi, 31); // sar reghi,31
+ }
+ a.bind(skip1); // skip1:
+ reset_last_upper_lower_reg();
+ a.shrd(reglo, reghi, cl); // shrd reglo,reghi,cl
+ if (saveflags) a.pushfd(); // pushf
+ a.sar(reghi, cl); // sar reghi,cl
+
+ if (saveflags)
+ {
+ // take carry from lower register's flags
+ a.pushfd();
+ a.mov(ecx, dword_ptr(esp, 4));
+ a.and_(ecx, 0x01); // carry flag
+ a.and_(dword_ptr(esp, 0), ~0x01);
+ a.or_(dword_ptr(esp, 0), ecx);
+ a.popfd();
+
+ emit_combine_z_flags(a);
+
+ a.short_().jmp(end);
+ }
+
+ a.bind(skipall);
+
+ if (saveflags)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
}
- track_resolve_link(dst, skip1); // skip1:
- emit_shrd_r32_r32_cl(dst, reglo, reghi); // shrd reglo,reghi,cl
- if (saveflags) emit_pushf(dst); // pushf
- emit_sar_r32_cl(dst, reghi); // sar reghi,cl
+
+ a.bind(end);
}
- if (saveflags)
- emit_combine_z_flags(dst);
}
@@ -2523,13 +2392,17 @@ void drcbe_x86::emit_sar_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_rol_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_rol_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
+ int saveflags = inst.flags() != 0;
+
+ Gp tempreg = esi; // TODO: try to avoid collision with reglo and reghi?
+ a.push(tempreg);
+
if (param.is_immediate())
{
int count = param.immediate() & 63;
- if (inst.flags() == 0 && count == 0)
+ if (!inst.flags() && count == 0)
;// skip
else
{
@@ -2537,59 +2410,98 @@ void drcbe_x86::emit_rol_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
{
if (inst.flags() != 0)
{
- emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo
- emit_shld_r32_r32_imm(dst, reglo, reghi, 31); // shld reglo,reghi,31
- emit_shld_r32_r32_imm(dst, reghi, REG_ECX, 31); // shld reghi,ecx,31
+ a.mov(ecx, reglo);
+ a.shld(reglo, reghi, 31);
+ a.shld(reghi, ecx, 31);
count -= 31;
}
else
{
- emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo
+ a.xchg(reghi, reglo);
count -= 32;
}
}
- if (inst.flags() != 0 || count > 0)
+
+ a.mov(ecx, reglo);
+ a.shld(reglo, reghi, count);
+ if (saveflags && count != 0) a.pushfd();
+ a.shld(reghi, ecx, count);
+ }
+
+ if (saveflags)
+ {
+ if (count == 0)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+ else
{
- emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo
- emit_shld_r32_r32_imm(dst, reglo, reghi, count); // shld reglo,reghi,count
- if (saveflags) emit_pushf(dst); // pushf
- emit_shld_r32_r32_imm(dst, reghi, REG_ECX, count); // shld reghi,ecx,count
+ emit_combine_zs_flags(a);
}
}
}
else
{
- emit_link skip1, skip2;
- int tempreg = REG_EBX;
- emit_mov_m32_r32(dst, MBD(REG_ESP, -8), tempreg); // mov [esp-8],ebx
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1
+ Label skipall = a.newLabel();
+ Label end = a.newLabel();
+ Label skip1 = a.newLabel();
+ Label shift_loop = a.newLabel();
+
+ emit_mov_r32_p32(a, ecx, param);
+
+ a.and_(ecx, 63);
+ a.test(ecx, ecx);
+ a.short_().jz(skipall);
+
+ a.cmp(ecx, 32);
+ a.short_().jl(skip1);
+
+ a.bind(shift_loop);
if (inst.flags() != 0)
{
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo
- emit_shld_r32_r32_imm(dst, reglo, reghi, 31); // shld reglo,reghi,31
- emit_shld_r32_r32_imm(dst, reghi, tempreg, 31); // shld reghi,ebx,31
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo
- emit_shld_r32_r32_imm(dst, reglo, reghi, 31); // shld reglo,reghi,31
- emit_shld_r32_r32_imm(dst, reghi, tempreg, 31); // shld reghi,ebx,31
- track_resolve_link(dst, skip2); // skip2:
+ a.sub(ecx, 31);
+ a.mov(tempreg, reglo);
+ a.shld(reglo, reghi, 31);
+ a.shld(reghi, tempreg, 31);
}
else
- emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo
- track_resolve_link(dst, skip1); // skip1:
- emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo
- emit_shld_r32_r32_cl(dst, reglo, reghi); // shld reglo,reghi,cl
- if (saveflags) emit_pushf(dst); // pushf
- emit_shld_r32_r32_cl(dst, reghi, tempreg); // shld reghi,ebx,cl
- emit_mov_r32_m32(dst, tempreg, MBD(REG_ESP, saveflags ? -4 : -8)); // mov ebx,[esp-8]
+ {
+ a.xchg(reghi, reglo);
+ a.sub(ecx, 32);
+ }
+ a.cmp(ecx, 32);
+ a.short_().jge(shift_loop);
+
+ a.bind(skip1);
+ reset_last_upper_lower_reg();
+ a.mov(tempreg, reglo);
+ a.shld(reglo, reghi, cl);
+ if (saveflags) a.pushfd();
+ a.shld(reghi, tempreg, cl);
+
+ if (saveflags)
+ {
+ emit_combine_zs_flags(a);
+ a.short_().jmp(end);
+ }
+
+ a.bind(skipall);
+
+ if (saveflags)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+
+ a.bind(end);
}
- if (saveflags)
- emit_combine_z_flags(dst);
+
+ a.pop(tempreg);
}
@@ -2598,13 +2510,17 @@ void drcbe_x86::emit_rol_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_ror_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_ror_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
+ int saveflags = inst.flags() != 0;
+
+ Gp tempreg = esi; // TODO: try to avoid collision with reglo and reghi?
+ a.push(tempreg);
+
if (param.is_immediate())
{
int count = param.immediate() & 63;
- if (inst.flags() == 0 && count == 0)
+ if (!inst.flags() && count == 0)
;// skip
else
{
@@ -2612,59 +2528,98 @@ void drcbe_x86::emit_ror_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
{
if (inst.flags() != 0)
{
- emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_shrd_r32_r32_imm(dst, reghi, REG_ECX, 31); // shrd reghi,ecx,31
+ a.mov(tempreg, reglo);
+ a.shrd(reglo, reghi, 31);
+ a.shrd(reghi, tempreg, 31);
count -= 31;
}
else
{
- emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo
+ a.xchg(reghi, reglo);
count -= 32;
}
}
- if (inst.flags() != 0 || count > 0)
+
+ a.mov(tempreg, reghi);
+ a.shrd(reghi, reglo, count);
+ if (saveflags && count != 0) a.pushfd();
+ a.shrd(reglo, tempreg, count);
+
+ if (saveflags)
{
- emit_mov_r32_r32(dst, REG_ECX, reglo); // mov ecx,reglo
- emit_shrd_r32_r32_imm(dst, reglo, reghi, count); // shrd reglo,reghi,count
- if (saveflags) emit_pushf(dst); // pushf
- emit_shrd_r32_r32_imm(dst, reghi, REG_ECX, count); // shrd reghi,ecx,count
+ if (count == 0)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+ else
+ {
+ emit_combine_zs_flags(a);
+ }
}
}
}
else
{
- emit_link skip1, skip2;
- int tempreg = REG_EBX;
- emit_mov_m32_r32(dst, MBD(REG_ESP, -8), tempreg); // mov [esp-8],ebx
- emit_mov_r32_p32(dst, REG_ECX, param); // mov ecx,param
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip1); // jz skip1
+ Label skipall = a.newLabel();
+ Label end = a.newLabel();
+ Label skip1 = a.newLabel();
+ Label shift_loop = a.newLabel();
+
+ emit_mov_r32_p32(a, ecx, param);
+
+ a.and_(ecx, 63);
+ a.test(ecx, ecx);
+ a.short_().jz(skipall);
+
+ a.cmp(ecx, 32);
+ a.short_().jl(skip1);
+
+ a.bind(shift_loop);
if (inst.flags() != 0)
{
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_shrd_r32_r32_imm(dst, reghi, tempreg, 31); // shrd reghi,ebx,31
- emit_test_r32_imm(dst, REG_ECX, 0x20); // test ecx,0x20
- emit_jcc_short_link(dst, x86emit::COND_Z, skip2); // jz skip2
- emit_sub_r32_imm(dst, REG_ECX, 31); // sub ecx,31
- emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo
- emit_shrd_r32_r32_imm(dst, reglo, reghi, 31); // shrd reglo,reghi,31
- emit_shrd_r32_r32_imm(dst, reghi, tempreg, 31); // shrd reghi,ebx,31
- track_resolve_link(dst, skip2); // skip2:
+ a.sub(ecx, 31);
+ a.mov(tempreg, reglo);
+ a.shrd(reglo, reghi, 31);
+ a.shrd(reghi, tempreg, 31);
}
else
- emit_xchg_r32_r32(dst, reghi, reglo); // xchg reghi,reglo
- track_resolve_link(dst, skip1); // skip1:
- emit_mov_r32_r32(dst, tempreg, reglo); // mov ebx,reglo
- emit_shrd_r32_r32_cl(dst, reglo, reghi); // shrd reglo,reghi,cl
- if (saveflags) emit_pushf(dst); // pushf
- emit_shrd_r32_r32_cl(dst, reghi, tempreg); // shrd reghi,ebx,cl
- emit_mov_r32_m32(dst, tempreg, MBD(REG_ESP, saveflags ? -4 : -8)); // mov ebx,[esp-8]
+ {
+ a.xchg(reghi, reglo);
+ a.sub(ecx, 32);
+ }
+ a.cmp(ecx, 32);
+ a.short_().jge(shift_loop);
+
+ a.bind(skip1);
+ reset_last_upper_lower_reg();
+ a.mov(tempreg, reghi);
+ a.shrd(reghi, reglo, cl);
+ if (saveflags) a.pushfd();
+ a.shrd(reglo, tempreg, cl);
+
+ if (saveflags)
+ {
+ emit_combine_zs_flags(a);
+ a.short_().jmp(end);
+ }
+
+ a.bind(skipall);
+
+ if (saveflags)
+ {
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
+ }
+
+ a.bind(end);
}
- if (saveflags)
- emit_combine_z_flags(dst);
+
+ a.pop(tempreg);
}
@@ -2673,40 +2628,46 @@ void drcbe_x86::emit_ror_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_rcl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_rcl_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = ((inst.flags() & FLAG_Z) != 0);
- emit_link skipall, skiploop;
- x86code *loop;
+ Label loop = a.newLabel();
+ Label skipall = a.newLabel();
+ Label skiploop = a.newLabel();
- emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param
- if (!saveflags)
- {
- loop = dst; // loop:
- emit_jecxz_link(dst, skipall); // jecxz skipall
- emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1]
- emit_rcl_r32_imm(dst, reglo, 1); // rcl reglo,1
- emit_rcl_r32_imm(dst, reghi, 1); // rcl reghi,1
- emit_jmp(dst, loop); // jmp loop
- track_resolve_link(dst, skipall); // skipall:
- }
- else
+ a.pushfd(); // keep carry flag after and
+ emit_mov_r32_p32(a, ecx, param);
+
+ a.and_(ecx, 63);
+ a.popfd();
+
+ a.short_().jecxz(skipall);
+ a.lea(ecx, ptr(ecx, -1));
+
+ a.bind(loop);
+ a.short_().jecxz(skiploop);
+ a.lea(ecx, ptr(ecx, -1));
+ a.rcl(reglo, 1);
+ a.rcl(reghi, 1);
+ a.short_().jmp(loop);
+
+ a.bind(skiploop);
+ reset_last_upper_lower_reg();
+ a.rcl(reglo, 1);
+ a.rcl(reghi, 1);
+
+ a.bind(skipall);
+ if (inst.flags())
{
- emit_jecxz_link(dst, skipall); // jecxz skipall
- emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1]
- loop = dst; // loop:
- emit_jecxz_link(dst, skiploop); // jecxz skiploop
- emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1]
- emit_rcl_r32_imm(dst, reglo, 1); // rcl reglo,1
- emit_rcl_r32_imm(dst, reghi, 1); // rcl reghi,1
- emit_jmp(dst, loop); // jmp loop
- track_resolve_link(dst, skiploop); // skiploop:
- emit_rcl_r32_imm(dst, reglo, 1); // rcl reglo,1
- emit_pushf(dst); // pushf
- emit_rcl_r32_imm(dst, reghi, 1); // rcl reghi,1
- track_resolve_link(dst, skipall); // skipall:
- emit_combine_z_flags(dst);
+ if (inst.flags() & FLAG_C)
+ calculate_status_flags(a, reglo, FLAG_Z);
+ else
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
}
+
+ reset_last_upper_lower_reg();
}
@@ -2715,43 +2676,107 @@ void drcbe_x86::emit_rcl_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// pair of registers from a 64-bit parameter
//-------------------------------------------------
-void drcbe_x86::emit_rcr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, const be_parameter &param, const instruction &inst)
+void drcbe_x86::emit_rcr_r64_p64(Assembler &a, Gp const &reglo, Gp const &reghi, be_parameter const &param, const instruction &inst)
{
- int saveflags = (inst.flags() != 0);
- emit_link skipall, skiploop;
- x86code *loop;
+ Label loop = a.newLabel();
+ Label skipall = a.newLabel();
+ Label skiploop = a.newLabel();
+
+ a.pushfd(); // keep carry flag after and
+ emit_mov_r32_p32(a, ecx, param);
+
+ a.and_(ecx, 63);
+ a.popfd();
- emit_mov_r32_p32_keepflags(dst, REG_ECX, param); // mov ecx,param
- if (!saveflags)
+ a.short_().jecxz(skipall);
+ a.lea(ecx, ptr(ecx, -1));
+
+ a.bind(loop);
+ a.short_().jecxz(skiploop);
+ a.lea(ecx, ptr(ecx, -1));
+ a.rcr(reghi, 1);
+ a.rcr(reglo, 1);
+ a.short_().jmp(loop);
+
+ a.bind(skiploop);
+ reset_last_upper_lower_reg();
+ a.rcr(reghi, 1);
+ a.rcr(reglo, 1);
+
+ a.bind(skipall);
+ if (inst.flags())
{
- loop = dst; // loop:
- emit_jecxz_link(dst, skipall); // jecxz skipall
- emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1]
- emit_rcr_r32_imm(dst, reghi, 1); // rcr reghi,1
- emit_rcr_r32_imm(dst, reglo, 1); // rcr reglo,1
- emit_jmp(dst, loop); // jmp loop
- track_resolve_link(dst, skipall); // skipall:
+ if (inst.flags() & FLAG_C)
+ calculate_status_flags(a, reglo, FLAG_Z);
+ else
+ a.test(reglo, reglo);
+ a.pushfd();
+ calculate_status_flags(a, reghi, FLAG_S | FLAG_Z);
+ emit_combine_z_flags(a);
}
- else
+
+ reset_last_upper_lower_reg();
+}
+
+
+void drcbe_x86::alu_op_param(Assembler &a, Inst::Id const opcode_lo, Inst::Id const opcode_hi, Gp const &lo, Gp const &hi, be_parameter const &param, bool saveflags)
+{
+ if (param.is_memory())
+ {
+ if (opcode_lo == Inst::kIdTest) // can't use memory on right of test
+ a.emit(opcode_lo, MABS(param.memory(0)), lo); // opl [param],reglo
+ else
+ a.emit(opcode_lo, lo, MABS(param.memory(0))); // opl reglo,[param]
+ if (saveflags) a.pushfd(); // pushf
+ if (opcode_hi == Inst::kIdTest) // can't use memory on right of test
+ a.emit(opcode_hi, MABS(param.memory(4)), hi); // oph [param],reghi
+ else
+ a.emit(opcode_hi, hi, MABS(param.memory(4))); // oph reghi,[param]
+ }
+ else if (param.is_immediate())
+ {
+ a.emit(opcode_lo, lo, param.immediate()); // opl reglo,param
+ if (saveflags) a.pushfd(); // pushf
+ a.emit(opcode_hi, hi, param.immediate() >> 32); // oph reghi,param >> 32
+ }
+ else if (param.is_int_register())
{
- emit_jecxz_link(dst, skipall); // jecxz skipall
- emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1]
- loop = dst; // loop:
- emit_jecxz_link(dst, skiploop); // jecxz skiploop
- emit_lea_r32_m32(dst, REG_ECX, MBD(REG_ECX, -1)); // lea ecx,[ecx-1]
- emit_rcr_r32_imm(dst, reghi, 1); // rcr reghi,1
- emit_rcr_r32_imm(dst, reglo, 1); // rcr reglo,1
- emit_jmp(dst, loop); // jmp loop
- track_resolve_link(dst, skiploop); // skiploop:
- emit_rcr_r32_imm(dst, reghi, 1); // rcr reghi,1
- emit_pushf(dst); // pushf
- emit_rcr_r32_imm(dst, reglo, 1); // rcr reglo,1
- track_resolve_link(dst, skipall); // skipall:
- emit_combine_z_shl_flags(dst);
+ a.emit(opcode_lo, lo, Gpd(param.ireg())); // opl reglo,param
+ if (saveflags) a.pushfd(); // pushf
+
+ if (opcode_hi == Inst::kIdTest) // can't use memory on right of test
+ a.emit(opcode_hi, MABS(m_reghi[param.ireg()]), hi); // oph reghi[param],reghi
+ else
+ a.emit(opcode_hi, hi, MABS(m_reghi[param.ireg()])); // oph reghi,reghi[param]
}
+
+ if (saveflags)
+ emit_combine_z_flags(a);
}
+void drcbe_x86::alu_op_param(Assembler &a, Inst::Id const opcode_lo, Inst::Id const opcode_hi, Mem const &lo, Mem const &hi, be_parameter const &param, bool saveflags)
+{
+ if (param.is_immediate())
+ {
+ a.emit(opcode_lo, lo, param.immediate()); // opl [dest],param
+ if (saveflags) a.pushfd(); // pushf
+ a.emit(opcode_hi, hi, param.immediate() >> 32); // oph [dest+4],param >> 32
+ }
+ else
+ {
+ Gp const reg = (param.is_int_register()) ? Gpd(param.ireg()) : eax;
+
+ emit_mov_r64_p64(a, reg, edx, param); // mov edx:reglo,param
+ a.emit(opcode_lo, lo, reg); // opl [dest],reglo
+ if (saveflags) a.pushfd(); // pushf
+ a.emit(opcode_hi, hi, edx); // oph [dest+4],edx
+ }
+
+ if (saveflags)
+ emit_combine_z_flags(a);
+}
+
//**************************************************************************
// EMITTERS FOR FLOATING POINT
@@ -2762,14 +2787,11 @@ void drcbe_x86::emit_rcr_r64_p64(x86code *&dst, uint8_t reglo, uint8_t reghi, co
// onto the stack
//-------------------------------------------------
-void drcbe_x86::emit_fld_p(x86code *&dst, int size, const be_parameter &param)
+void drcbe_x86::emit_fld_p(Assembler &a, int size, be_parameter const &param)
{
assert(param.is_memory());
assert(size == 4 || size == 8);
- if (size == 4)
- emit_fld_m32(dst, MABS(param.memory()));
- else if (size == 8)
- emit_fld_m64(dst, MABS(param.memory()));
+ a.fld(ptr(uintptr_t(param.memory()), size));
}
@@ -2778,101 +2800,42 @@ void drcbe_x86::emit_fld_p(x86code *&dst, int size, const be_parameter &param)
// from the stack and pop it
//-------------------------------------------------
-void drcbe_x86::emit_fstp_p(x86code *&dst, int size, const be_parameter &param)
+void drcbe_x86::emit_fstp_p(Assembler &a, int size, be_parameter const &param)
{
assert(param.is_memory());
assert(size == 4 || size == 8);
- if (size == 4)
- emit_fstp_m32(dst, MABS(param.memory()));
- else if (size == 8)
- emit_fstp_m64(dst, MABS(param.memory()));
+
+ a.fstp(ptr(uintptr_t(param.memory()), size));
}
//**************************************************************************
-// OUT-OF-BAND CODE FIXUP CALLBACKS
+// DEBUG HELPERS
//**************************************************************************
//-------------------------------------------------
-// fixup_label - callback to fixup forward-
-// referenced labels
+// end_of_block - function to catch falling off
+// the end of a generated code block
//-------------------------------------------------
-void drcbe_x86::fixup_label(void *parameter, drccodeptr labelcodeptr)
+[[noreturn]] void drcbe_x86::end_of_block() const
{
- drccodeptr src = (drccodeptr)parameter;
-
- // find the end of the instruction
- if (src[0] == 0xe3)
- {
- src += 1 + 1;
- src[-1] = labelcodeptr - src;
- }
- else if (src[0] == 0xe9)
- {
- src += 1 + 4;
- ((uint32_t *)src)[-1] = labelcodeptr - src;
- }
- else if (src[0] == 0x0f && (src[1] & 0xf0) == 0x80)
- {
- src += 2 + 4;
- ((uint32_t *)src)[-1] = labelcodeptr - src;
- }
- else
- fatalerror("fixup_label called with invalid jmp source!\n");
+ osd_printf_error("drcbe_x86(%s): fell off the end of a generated code block!\n", m_device.tag());
+ std::fflush(stdout);
+ std::fflush(stderr);
+ std::abort();
}
//-------------------------------------------------
-// fixup_exception - callback to perform cleanup
-// and jump to an exception handler
-//-------------------------------------------------
-
-void drcbe_x86::fixup_exception(drccodeptr *codeptr, void *param1, void *param2)
-{
- drccodeptr src = (drccodeptr)param1;
- const instruction &inst = *(const instruction *)param2;
-
- // normalize parameters
- const parameter &handp = inst.param(0);
- assert(handp.is_code_handle());
- be_parameter exp(*this, inst.param(1), PTYPE_MRI);
-
- // look up the handle target
- drccodeptr *targetptr = handp.handle().codeptr_addr();
-
- // first fixup the jump to get us here
- drccodeptr dst = *codeptr;
- ((uint32_t *)src)[-1] = dst - src;
-
- // then store the exception parameter
- emit_mov_m32_p32(dst, MABS(&m_state.exp), exp); // mov [exp],exp
-
- // push the original return address on the stack
- emit_push_imm(dst, (uintptr_t)src); // push <return>
- if (*targetptr != nullptr)
- emit_jmp(dst, *targetptr); // jmp *targetptr
- else
- emit_jmp_m32(dst, MABS(targetptr)); // jmp [targetptr]
-
- *codeptr = dst;
-}
-
-
-
-//**************************************************************************
-// DEBUG HELPERS
-//**************************************************************************
-
-//-------------------------------------------------
// debug_log_hashjmp - callback to handle
// logging of hashjmps
//-------------------------------------------------
void drcbe_x86::debug_log_hashjmp(int mode, offs_t pc)
{
- printf("mode=%d PC=%08X\n", mode, pc);
+ std::printf("mode=%d PC=%08X\n", mode, pc);
}
@@ -2885,7 +2848,7 @@ void drcbe_x86::debug_log_hashjmp(int mode, offs_t pc)
// op_handle - process a HANDLE opcode
//-------------------------------------------------
-void drcbe_x86::op_handle(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_handle(Assembler &a, const instruction &inst)
{
assert_no_condition(inst);
assert_no_flags(inst);
@@ -2894,16 +2857,21 @@ void drcbe_x86::op_handle(x86code *&dst, const instruction &inst)
reset_last_upper_lower_reg();
+ // make a label for documentation
+ Label handle = a.newNamedLabel(inst.param(0).handle().string());
+ a.bind(handle);
+
// emit a jump around the stack adjust in case code falls through here
- emit_link skip;
- emit_jmp_short_link(dst, skip); // jmp skip
+ Label skip = a.newLabel();
+ a.short_().jmp(skip); // jmp skip
// register the current pointer for the handle
- inst.param(0).handle().set_codeptr(dst);
+ inst.param(0).handle().set_codeptr(drccodeptr(a.code()->baseAddress() + a.offset()));
// by default, the handle points to prolog code that moves the stack pointer
- emit_lea_r32_m32(dst, REG_ESP, MBD(REG_ESP, -28)); // lea rsp,[rsp-28]
- track_resolve_link(dst, skip); // skip:
+ a.lea(esp, ptr(esp, -28)); // lea rsp,[rsp-28]
+ a.bind(skip); // skip:
+ reset_last_upper_lower_reg();
}
@@ -2911,7 +2879,7 @@ void drcbe_x86::op_handle(x86code *&dst, const instruction &inst)
// op_hash - process a HASH opcode
//-------------------------------------------------
-void drcbe_x86::op_hash(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_hash(Assembler &a, const instruction &inst)
{
assert_no_condition(inst);
assert_no_flags(inst);
@@ -2920,7 +2888,7 @@ void drcbe_x86::op_hash(x86code *&dst, const instruction &inst)
assert(inst.param(1).is_immediate());
// register the current pointer for the mode/PC
- m_hash.set_codeptr(inst.param(0).immediate(), inst.param(1).immediate(), dst);
+ m_hash.set_codeptr(inst.param(0).immediate(), inst.param(1).immediate(), drccodeptr(a.code()->baseAddress() + a.offset()));
reset_last_upper_lower_reg();
}
@@ -2929,15 +2897,21 @@ void drcbe_x86::op_hash(x86code *&dst, const instruction &inst)
// op_label - process a LABEL opcode
//-------------------------------------------------
-void drcbe_x86::op_label(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_label(Assembler &a, const instruction &inst)
{
assert_no_condition(inst);
assert_no_flags(inst);
assert(inst.numparams() == 1);
assert(inst.param(0).is_code_label());
+ std::string labelName = util::string_format("PC$%x", inst.param(0).label());
+ Label label = a.labelByName(labelName.c_str());
+ if (!label.isValid())
+ label = a.newNamedLabel(labelName.c_str());
+
// register the current pointer for the label
- m_labels.set_codeptr(inst.param(0).label(), dst);
+ a.bind(label);
+
reset_last_upper_lower_reg();
}
@@ -2946,7 +2920,7 @@ void drcbe_x86::op_label(x86code *&dst, const instruction &inst)
// op_comment - process a COMMENT opcode
//-------------------------------------------------
-void drcbe_x86::op_comment(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_comment(Assembler &a, const instruction &inst)
{
assert_no_condition(inst);
assert_no_flags(inst);
@@ -2961,7 +2935,7 @@ void drcbe_x86::op_comment(x86code *&dst, const instruction &inst)
// op_mapvar - process a MAPVAR opcode
//-------------------------------------------------
-void drcbe_x86::op_mapvar(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_mapvar(Assembler &a, const instruction &inst)
{
assert_no_condition(inst);
assert_no_flags(inst);
@@ -2970,7 +2944,7 @@ void drcbe_x86::op_mapvar(x86code *&dst, const instruction &inst)
assert(inst.param(1).is_immediate());
// set the value of the specified mapvar
- m_map.set_value(dst, inst.param(0).mapvar(), inst.param(1).immediate());
+ m_map.set_value(drccodeptr(a.code()->baseAddress() + a.offset()), inst.param(0).mapvar(), inst.param(1).immediate());
}
@@ -2983,42 +2957,55 @@ void drcbe_x86::op_mapvar(x86code *&dst, const instruction &inst)
// op_nop - process a NOP opcode
//-------------------------------------------------
-void drcbe_x86::op_nop(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_nop(Assembler &a, const instruction &inst)
{
// nothing
}
+//-------------------------------------------------
+// op_break - process a BREAK opcode
+//-------------------------------------------------
+
+void drcbe_x86::op_break(Assembler &a, const instruction &inst)
+{
+ static const char *const message = "break from drc";
+ a.mov(dword_ptr(esp, 0), imm(message));
+ a.call(imm(&osd_break_into_debugger));
+}
//-------------------------------------------------
// op_debug - process a DEBUG opcode
//-------------------------------------------------
-void drcbe_x86::op_debug(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_debug(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
assert_no_condition(inst);
assert_no_flags(inst);
- using debugger_hook_func = void (*)(device_debug *, offs_t);
- static const debugger_hook_func debugger_inst_hook = [] (device_debug *dbg, offs_t pc) { dbg->instruction_hook(pc); }; // TODO: kill trampoline if possible
-
if ((m_device.machine().debug_flags & DEBUG_FLAG_ENABLED) != 0)
{
// normalize parameters
- be_parameter pcp(*this, inst.param(0), PTYPE_MRI);
+ be_parameter const pcp(*this, inst.param(0), PTYPE_MRI);
// test and branch
- emit_test_m32_imm(dst, MABS(&m_device.machine().debug_flags), DEBUG_FLAG_CALL_HOOK); // test [debug_flags],DEBUG_FLAG_CALL_HOOK
- emit_link skip = { nullptr };
- emit_jcc_short_link(dst, x86emit::COND_Z, skip); // jz skip
+ a.test(MABS(&m_device.machine().debug_flags, 4), DEBUG_FLAG_CALL_HOOK);
+ Label skip = a.newLabel();
+ a.short_().jz(skip);
// push the parameter
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), pcp); // mov [esp+4],pcp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_device.debug()); // mov [esp],device.debug
- emit_call(dst, (x86code *)debugger_inst_hook); // call debugger_inst_hook
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), pcp);
+ if (USE_THISCALL)
+ a.mov(ecx, imm(m_debug_cpu_instruction_hook.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(m_debug_cpu_instruction_hook.obj));
+ a.call(imm(m_debug_cpu_instruction_hook.func));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
- track_resolve_link(dst, skip); // skip:
+ a.bind(skip);
+ reset_last_upper_lower_reg();
}
}
@@ -3027,7 +3014,7 @@ void drcbe_x86::op_debug(x86code *&dst, const instruction &inst)
// op_exit - process an EXIT opcode
//-------------------------------------------------
-void drcbe_x86::op_exit(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_exit(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3038,11 +3025,11 @@ void drcbe_x86::op_exit(x86code *&dst, const instruction &inst)
be_parameter retp(*this, inst.param(0), PTYPE_MRI);
// load the parameter into EAX
- emit_mov_r32_p32(dst, REG_EAX, retp); // mov eax,retp
+ emit_mov_r32_p32(a, eax, retp); // mov eax,retp
if (inst.condition() == uml::COND_ALWAYS)
- emit_jmp(dst, m_exit); // jmp exit
+ a.jmp(imm(m_exit)); // jmp exit
else
- emit_jcc(dst, X86_CONDITION(inst.condition()), m_exit); // jcc exit
+ a.j(X86_CONDITION(inst.condition()), imm(m_exit)); // jcc exit
}
@@ -3050,7 +3037,7 @@ void drcbe_x86::op_exit(x86code *&dst, const instruction &inst)
// op_hashjmp - process a HASHJMP opcode
//-------------------------------------------------
-void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_hashjmp(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3060,74 +3047,71 @@ void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter modep(*this, inst.param(0), PTYPE_MRI);
be_parameter pcp(*this, inst.param(1), PTYPE_MRI);
- const parameter &exp = inst.param(2);
+ parameter const &exp = inst.param(2);
assert(exp.is_code_handle());
if (LOG_HASHJMPS)
{
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), pcp);
- emit_mov_m32_p32(dst, MBD(REG_ESP, 0), modep);
- emit_call(dst, (x86code *)debug_log_hashjmp);
+ emit_mov_m32_p32(a, dword_ptr(esp, 4), pcp);
+ emit_mov_m32_p32(a, dword_ptr(esp, 0), modep);
+ a.call(imm(debug_log_hashjmp));
}
// load the stack base one word early so we end up at the right spot after our call below
- emit_mov_r32_m32(dst, REG_ESP, MABS(&m_hashstacksave)); // mov esp,[hashstacksave]
+ a.mov(esp, MABS(&m_hashstacksave)); // mov esp,[hashstacksave]
- // fixed mode cases
if (modep.is_immediate() && m_hash.is_mode_populated(modep.immediate()))
{
- // a straight immediate jump is direct, though we need the PC in EAX in case of failure
+ // fixed mode cases
if (pcp.is_immediate())
{
+ // a straight immediate jump is direct, though we need the PC in EAX in case of failure
uint32_t l1val = (pcp.immediate() >> m_hash.l1shift()) & m_hash.l1mask();
uint32_t l2val = (pcp.immediate() >> m_hash.l2shift()) & m_hash.l2mask();
- emit_call_m32(dst, MABS(&m_hash.base()[modep.immediate()][l1val][l2val])); // call hash[modep][l1val][l2val]
+ a.call(MABS(&m_hash.base()[modep.immediate()][l1val][l2val])); // call hash[modep][l1val][l2val]
}
-
- // a fixed mode but variable PC
else
{
- emit_mov_r32_p32(dst, REG_EAX, pcp); // mov eax,pcp
- emit_mov_r32_r32(dst, REG_EDX, REG_EAX); // mov edx,eax
- emit_shr_r32_imm(dst, REG_EDX, m_hash.l1shift()); // shr edx,l1shift
- emit_and_r32_imm(dst, REG_EAX, m_hash.l2mask() << m_hash.l2shift());// and eax,l2mask << l2shift
- emit_mov_r32_m32(dst, REG_EDX, MABSI(&m_hash.base()[modep.immediate()][0], REG_EDX, 4));
- // mov edx,hash[modep+edx*4]
- emit_call_m32(dst, MBISD(REG_EDX, REG_EAX, 4 >> m_hash.l2shift(), 0));// call [edx+eax*shift]
+ // a fixed mode but variable PC
+ emit_mov_r32_p32(a, eax, pcp); // mov eax,pcp
+ a.mov(edx, eax); // mov edx,eax
+ a.shr(edx, m_hash.l1shift()); // shr edx,l1shift
+ a.and_(eax, m_hash.l2mask() << m_hash.l2shift()); // and eax,l2mask << l2shift
+ a.mov(edx, ptr(uintptr_t(&m_hash.base()[modep.immediate()][0]), edx, 2)); // mov edx,hash[modep+edx*4]
+ a.call(ptr(edx, eax, 2 - m_hash.l2shift())); // call [edx+eax*shift]
}
}
else
{
// variable mode
- int modereg = modep.select_register(REG_ECX);
- emit_mov_r32_p32(dst, modereg, modep); // mov modereg,modep
- emit_mov_r32_m32(dst, REG_ECX, MABSI(m_hash.base(), modereg, 4)); // mov ecx,hash[modereg*4]
+ Gp const modereg = modep.select_register(ecx);
+ emit_mov_r32_p32(a, modereg, modep); // mov modereg,modep
+ a.mov(ecx, ptr(uintptr_t(m_hash.base()), modereg, 2)); // mov ecx,hash[modereg*4]
- // fixed PC
if (pcp.is_immediate())
{
+ // fixed PC
uint32_t l1val = (pcp.immediate() >> m_hash.l1shift()) & m_hash.l1mask();
uint32_t l2val = (pcp.immediate() >> m_hash.l2shift()) & m_hash.l2mask();
- emit_mov_r32_m32(dst, REG_EDX, MBD(REG_ECX, l1val*4)); // mov edx,[ecx+l1val*4]
- emit_call_m32(dst, MBD(REG_EDX, l2val*4)); // call [l2val*4]
+ a.mov(edx, ptr(ecx, l1val*4)); // mov edx,[ecx+l1val*4]
+ a.call(ptr(edx, l2val*4)); // call [l2val*4]
}
-
- // variable PC
else
{
- emit_mov_r32_p32(dst, REG_EAX, pcp); // mov eax,pcp
- emit_mov_r32_r32(dst, REG_EDX, REG_EAX); // mov edx,eax
- emit_shr_r32_imm(dst, REG_EDX, m_hash.l1shift()); // shr edx,l1shift
- emit_mov_r32_m32(dst, REG_EDX, MBISD(REG_ECX, REG_EDX, 4, 0)); // mov edx,[ecx+edx*4]
- emit_and_r32_imm(dst, REG_EAX, m_hash.l2mask() << m_hash.l2shift());// and eax,l2mask << l2shift
- emit_call_m32(dst, MBISD(REG_EDX, REG_EAX, 4 >> m_hash.l2shift(), 0));// call [edx+eax*shift]
+ // variable PC
+ emit_mov_r32_p32(a, eax, pcp); // mov eax,pcp
+ a.mov(edx, eax); // mov edx,eax
+ a.shr(edx, m_hash.l1shift()); // shr edx,l1shift
+ a.mov(edx, ptr(ecx, edx, 2)); // mov edx,[ecx+edx*4]
+ a.and_(eax, m_hash.l2mask() << m_hash.l2shift()); // and eax,l2mask << l2shift
+ a.call(ptr(edx, eax, 2 - m_hash.l2shift())); // call [edx+eax*shift]
}
}
// in all cases, if there is no code, we return here to generate the exception
- emit_mov_m32_p32(dst, MABS(&m_state.exp), pcp); // mov [exp],param
- emit_sub_r32_imm(dst, REG_ESP, 4); // sub esp,4
- emit_call_m32(dst, MABS(exp.handle().codeptr_addr())); // call [exp]
+ emit_mov_m32_p32(a, MABS(&m_state.exp, 4), pcp); // mov [exp],param
+ a.sub(esp, 4); // sub esp,4
+ a.call(MABS(exp.handle().codeptr_addr())); // call [exp]
}
@@ -3135,7 +3119,7 @@ void drcbe_x86::op_hashjmp(x86code *&dst, const instruction &inst)
// op_jmp - process a JMP opcode
//-------------------------------------------------
-void drcbe_x86::op_jmp(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_jmp(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3143,15 +3127,18 @@ void drcbe_x86::op_jmp(x86code *&dst, const instruction &inst)
assert_no_flags(inst);
// normalize parameters
- const parameter &labelp = inst.param(0);
+ parameter const &labelp = inst.param(0);
assert(labelp.is_code_label());
- // look up the jump target and jump there
- x86code *jmptarget = (x86code *)m_labels.get_codeptr(labelp.label(), m_fixup_label, dst);
+ std::string labelName = util::string_format("PC$%x", labelp.label());
+ Label jmptarget = a.labelByName(labelName.c_str());
+ if (!jmptarget.isValid())
+ jmptarget = a.newNamedLabel(labelName.c_str());
+
if (inst.condition() == uml::COND_ALWAYS)
- emit_jmp(dst, jmptarget); // jmp target
+ a.jmp(jmptarget);
else
- emit_jcc(dst, X86_CONDITION(inst.condition()), jmptarget); // jcc target
+ a.j(X86_CONDITION(inst.condition()), jmptarget);
}
@@ -3159,7 +3146,7 @@ void drcbe_x86::op_jmp(x86code *&dst, const instruction &inst)
// op_exh - process an EXH opcode
//-------------------------------------------------
-void drcbe_x86::op_exh(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_exh(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3167,29 +3154,27 @@ void drcbe_x86::op_exh(x86code *&dst, const instruction &inst)
assert_no_flags(inst);
// normalize parameters
- const parameter &handp = inst.param(0);
+ parameter const &handp = inst.param(0);
assert(handp.is_code_handle());
be_parameter exp(*this, inst.param(1), PTYPE_MRI);
// look up the handle target
drccodeptr *targetptr = handp.handle().codeptr_addr();
- // perform the exception processing inline if unconditional
- if (inst.condition() == uml::COND_ALWAYS)
+ // perform the exception processing
+ Label no_exception;
+ if (inst.condition() != uml::COND_ALWAYS)
{
- emit_mov_m32_p32(dst, MABS(&m_state.exp), exp); // mov [exp],exp
- if (*targetptr != nullptr)
- emit_call(dst, *targetptr); // call *targetptr
- else
- emit_call_m32(dst, MABS(targetptr)); // call [targetptr]
+ no_exception = a.newLabel();
+ a.short_().j(X86_NOT_CONDITION(inst.condition()), no_exception); // jcc no_exception
}
-
- // otherwise, jump to an out-of-band handler
+ emit_mov_m32_p32(a, MABS(&m_state.exp, 4), exp); // mov [exp],exp
+ if (*targetptr != nullptr)
+ a.call(imm(*targetptr)); // call *targetptr
else
- {
- emit_jcc(dst, X86_CONDITION(inst.condition()), nullptr); // jcc exception
- m_cache.request_oob_codegen(m_fixup_exception, dst, &const_cast<instruction &>(inst));
- }
+ a.call(MABS(targetptr)); // call [targetptr]
+ if (inst.condition() != uml::COND_ALWAYS)
+ a.bind(no_exception);
}
@@ -3197,7 +3182,7 @@ void drcbe_x86::op_exh(x86code *&dst, const instruction &inst)
// op_callh - process a CALLH opcode
//-------------------------------------------------
-void drcbe_x86::op_callh(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_callh(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3205,26 +3190,32 @@ void drcbe_x86::op_callh(x86code *&dst, const instruction &inst)
assert_no_flags(inst);
// normalize parameters
- const parameter &handp = inst.param(0);
+ parameter const &handp = inst.param(0);
assert(handp.is_code_handle());
// look up the handle target
drccodeptr *targetptr = handp.handle().codeptr_addr();
// skip if conditional
- emit_link skip = { nullptr };
+ Label skip;
if (inst.condition() != uml::COND_ALWAYS)
- emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ {
+ skip = a.newLabel();
+ a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ }
// jump through the handle; directly if a normal jump
if (*targetptr != nullptr)
- emit_call(dst, *targetptr); // call *targetptr
+ a.call(imm(*targetptr)); // call *targetptr
else
- emit_call_m32(dst, MABS(targetptr)); // call [targetptr]
+ a.call(MABS(targetptr)); // call [targetptr]
// resolve the conditional link
if (inst.condition() != uml::COND_ALWAYS)
- track_resolve_link(dst, skip); // skip:
+ {
+ a.bind(skip); // skip:
+ reset_last_upper_lower_reg();
+ }
}
@@ -3232,7 +3223,7 @@ void drcbe_x86::op_callh(x86code *&dst, const instruction &inst)
// op_ret - process a RET opcode
//-------------------------------------------------
-void drcbe_x86::op_ret(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_ret(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3241,17 +3232,23 @@ void drcbe_x86::op_ret(x86code *&dst, const instruction &inst)
assert(inst.numparams() == 0);
// skip if conditional
- emit_link skip = { nullptr };
+ Label skip;
if (inst.condition() != uml::COND_ALWAYS)
- emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ {
+ skip = a.newLabel();
+ a.short_().j(X86_NOT_CONDITION(inst.condition()), skip);
+ }
// return
- emit_lea_r32_m32(dst, REG_ESP, MBD(REG_ESP, 28)); // lea rsp,[rsp+28]
- emit_ret(dst); // ret
+ a.lea(esp, ptr(esp, 28)); // lea rsp,[rsp+28]
+ a.ret(); // ret
// resolve the conditional link
if (inst.condition() != uml::COND_ALWAYS)
- track_resolve_link(dst, skip); // skip:
+ {
+ a.bind(skip); // skip:
+ reset_last_upper_lower_reg();
+ }
}
@@ -3259,7 +3256,7 @@ void drcbe_x86::op_ret(x86code *&dst, const instruction &inst)
// op_callc - process a CALLC opcode
//-------------------------------------------------
-void drcbe_x86::op_callc(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_callc(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3267,22 +3264,28 @@ void drcbe_x86::op_callc(x86code *&dst, const instruction &inst)
assert_no_flags(inst);
// normalize parameters
- const parameter &funcp = inst.param(0);
+ parameter const &funcp = inst.param(0);
assert(funcp.is_c_function());
be_parameter paramp(*this, inst.param(1), PTYPE_M);
// skip if conditional
- emit_link skip = { nullptr };
+ Label skip;
if (inst.condition() != uml::COND_ALWAYS)
- emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ {
+ skip = a.newLabel();
+ a.short_().j(X86_NOT_CONDITION(inst.condition()), skip);
+ }
// perform the call
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)paramp.memory()); // mov [esp],paramp
- emit_call(dst, (x86code *)(uintptr_t)funcp.cfunc()); // call funcp
+ a.mov(dword_ptr(esp, 0), imm(paramp.memory()));
+ a.call(imm(funcp.cfunc()));
// resolve the conditional link
if (inst.condition() != uml::COND_ALWAYS)
- track_resolve_link(dst, skip); // skip:
+ {
+ a.bind(skip);
+ reset_last_upper_lower_reg();
+ }
}
@@ -3290,7 +3293,7 @@ void drcbe_x86::op_callc(x86code *&dst, const instruction &inst)
// op_recover - process a RECOVER opcode
//-------------------------------------------------
-void drcbe_x86::op_recover(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_recover(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3301,14 +3304,19 @@ void drcbe_x86::op_recover(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
// call the recovery code
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_stacksave)); // mov eax,stacksave
- emit_mov_r32_m32(dst, REG_EAX, MBD(REG_EAX, -4)); // mov eax,[eax-4]
- emit_sub_r32_imm(dst, REG_EAX, 1); // sub eax,1
- emit_mov_m32_imm(dst, MBD(REG_ESP, 8), inst.param(1).mapvar()); // mov [esp+8],param1
- emit_mov_m32_r32(dst, MBD(REG_ESP, 4), REG_EAX); // mov [esp+4],eax
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_map); // mov [esp],m_map
- emit_call(dst, (x86code *)&drc_map_variables::static_get_value); // call drcmap_get_value
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
+ a.mov(eax, MABS(&m_stacksave));
+ a.mov(eax, ptr(eax, -4));
+ a.sub(eax, 1);
+ a.mov(dword_ptr(esp, USE_THISCALL ? 4 : 8), inst.param(1).mapvar());
+ a.mov(ptr(esp, USE_THISCALL ? 0 : 4), eax);
+ if (USE_THISCALL)
+ a.mov(ecx, imm(m_drcmap_get_value.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(m_drcmap_get_value.obj));
+ a.call(imm(m_drcmap_get_value.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ emit_mov_p32_r32(a, dstp, eax);
}
@@ -3321,7 +3329,7 @@ void drcbe_x86::op_recover(x86code *&dst, const instruction &inst)
// op_setfmod - process a SETFMOD opcode
//-------------------------------------------------
-void drcbe_x86::op_setfmod(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_setfmod(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3335,17 +3343,17 @@ void drcbe_x86::op_setfmod(x86code *&dst, const instruction &inst)
if (srcp.is_immediate())
{
int value = srcp.immediate() & 3;
- emit_mov_m8_imm(dst, MABS(&m_state.fmod), value); // mov [fmod],srcp
- emit_fldcw_m16(dst, MABS(&fp_control[value])); // fldcw fp_control[srcp]
+ a.mov(MABS(&m_state.fmod, 1), value); // mov [fmod],srcp
+ a.fldcw(MABS(&fp_control[value], 2)); // fldcw fp_control[srcp]
}
// register/memory case
else
{
- emit_mov_r32_p32(dst, REG_EAX, srcp); // mov eax,srcp
- emit_and_r32_imm(dst, REG_EAX, 3); // and eax,3
- emit_mov_m8_r8(dst, MABS(&m_state.fmod), REG_AL); // mov [fmod],al
- emit_fldcw_m16(dst, MABSI(&fp_control[0], REG_EAX, 2)); // fldcw fp_control[eax]
+ emit_mov_r32_p32(a, eax, srcp); // mov eax,srcp
+ a.and_(eax, 3); // and eax,3
+ a.mov(MABS(&m_state.fmod), al); // mov [fmod],al
+ a.fldcw(ptr(uintptr_t(&fp_control[0]), eax, 1, 2)); // fldcw fp_control[eax]
}
}
@@ -3354,7 +3362,7 @@ void drcbe_x86::op_setfmod(x86code *&dst, const instruction &inst)
// op_getfmod - process a GETFMOD opcode
//-------------------------------------------------
-void drcbe_x86::op_getfmod(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_getfmod(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3366,11 +3374,11 @@ void drcbe_x86::op_getfmod(x86code *&dst, const instruction &inst)
// fetch the current mode and store to the destination
if (dstp.is_int_register())
- emit_movzx_r32_m8(dst, dstp.ireg(), MABS(&m_state.fmod)); // movzx reg,[fmod]
+ a.movzx(Gpd(dstp.ireg()), MABS(&m_state.fmod, 1)); // movzx reg,[fmod]
else
{
- emit_movzx_r32_m8(dst, REG_EAX, MABS(&m_state.fmod)); // movzx eax,[fmod]
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
+ a.movzx(eax, MABS(&m_state.fmod, 1)); // movzx eax,[fmod]
+ a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax
}
}
@@ -3379,7 +3387,7 @@ void drcbe_x86::op_getfmod(x86code *&dst, const instruction &inst)
// op_getexp - process a GETEXP opcode
//-------------------------------------------------
-void drcbe_x86::op_getexp(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_getexp(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3391,11 +3399,11 @@ void drcbe_x86::op_getexp(x86code *&dst, const instruction &inst)
// fetch the exception parameter and store to the destination
if (dstp.is_int_register())
- emit_mov_r32_m32(dst, dstp.ireg(), MABS(&m_state.exp)); // mov reg,[exp]
+ a.mov(Gpd(dstp.ireg()), MABS(&m_state.exp)); // mov reg,[exp]
else
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(&m_state.exp)); // mov eax,[exp]
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
+ a.mov(eax, MABS(&m_state.exp)); // mov eax,[exp]
+ a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax
}
}
@@ -3404,7 +3412,7 @@ void drcbe_x86::op_getexp(x86code *&dst, const instruction &inst)
// op_getflgs - process a GETFLGS opcode
//-------------------------------------------------
-void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_getflgs(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3416,7 +3424,9 @@ void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst)
be_parameter maskp(*this, inst.param(1), PTYPE_I);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
+
+ a.pushfd();
// compute mask for flags
uint32_t flagmask = 0;
@@ -3430,109 +3440,135 @@ void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst)
{
// single flags only
case FLAG_C:
- emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
+ a.setc(al); // setc al
+ a.movzx(dstreg, al); // movzx dstreg,al
break;
case FLAG_V:
- emit_setcc_r8(dst, x86emit::COND_O, REG_AL); // seto al
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
- emit_shl_r32_imm(dst, dstreg, 1); // shl dstreg,1
+ a.seto(al); // seto al
+ a.movzx(dstreg, al); // movzx dstreg,al
+ a.shl(dstreg, 1); // shl dstreg,1
break;
case FLAG_Z:
- emit_setcc_r8(dst, x86emit::COND_Z, REG_AL); // setz al
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
- emit_shl_r32_imm(dst, dstreg, 2); // shl dstreg,2
+ a.setz(al); // setz al
+ a.movzx(dstreg, al); // movzx dstreg,al
+ a.shl(dstreg, 2); // shl dstreg,2
break;
case FLAG_S:
- emit_setcc_r8(dst, x86emit::COND_S, REG_AL); // sets al
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
- emit_shl_r32_imm(dst, dstreg, 3); // shl dstreg,3
+ a.sets(al); // sets al
+ a.movzx(dstreg, al); // movzx dstreg,al
+ a.shl(dstreg, 3); // shl dstreg,3
break;
case FLAG_U:
- emit_setcc_r8(dst, x86emit::COND_P, REG_AL); // setp al
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
- emit_shl_r32_imm(dst, dstreg, 4); // shl dstreg,4
+ a.setp(al); // setp al
+ a.movzx(dstreg, al); // movzx dstreg,al
+ a.shl(dstreg, 4); // shl dstreg,4
break;
// carry plus another flag
case FLAG_C | FLAG_V:
- emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al
- emit_setcc_r8(dst, x86emit::COND_O, REG_CL); // seto cl
- emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al
- emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al
- emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 2, 0)); // lea dstreg,[eax+ecx*2]
+ a.setc(al); // setc al
+ a.seto(cl); // seto cl
+ a.movzx(eax, al); // movzx eax,al
+ a.movzx(ecx, cl); // movzx ecx,al
+ a.lea(dstreg, ptr(eax, ecx, 1)); // lea dstreg,[eax+ecx*2]
break;
case FLAG_C | FLAG_Z:
- emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al
- emit_setcc_r8(dst, x86emit::COND_Z, REG_CL); // setz cl
- emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al
- emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al
- emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 4, 0)); // lea dstreg,[eax+ecx*4]
+ a.setc(al); // setc al
+ a.setz(cl); // setz cl
+ a.movzx(eax, al); // movzx eax,al
+ a.movzx(ecx, cl); // movzx ecx,al
+ a.lea(dstreg, ptr(eax, ecx, 2)); // lea dstreg,[eax+ecx*4]
break;
case FLAG_C | FLAG_S:
- emit_setcc_r8(dst, x86emit::COND_C, REG_AL); // setc al
- emit_setcc_r8(dst, x86emit::COND_S, REG_CL); // sets cl
- emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al
- emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al
- emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 8, 0)); // lea dstreg,[eax+ecx*8]
+ a.setc(al); // setc al
+ a.sets(cl); // sets cl
+ a.movzx(eax, al); // movzx eax,al
+ a.movzx(ecx, cl); // movzx ecx,al
+ a.lea(dstreg, ptr(eax, ecx, 3)); // lea dstreg,[eax+ecx*8]
break;
// overflow plus another flag
case FLAG_V | FLAG_Z:
- emit_setcc_r8(dst, x86emit::COND_O, REG_AL); // seto al
- emit_setcc_r8(dst, x86emit::COND_Z, REG_CL); // setz cl
- emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al
- emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al
- emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 2, 0)); // lea dstreg,[eax+ecx*2]
- emit_shl_r32_imm(dst, dstreg, 1); // shl dstreg,1
+ a.seto(al); // seto al
+ a.setz(cl); // setz cl
+ a.movzx(eax, al); // movzx eax,al
+ a.movzx(ecx, cl); // movzx ecx,al
+ a.lea(dstreg, ptr(eax, ecx, 1)); // lea dstreg,[eax+ecx*2]
+ a.shl(dstreg, 1); // shl dstreg,1
break;
case FLAG_V | FLAG_S:
- emit_setcc_r8(dst, x86emit::COND_O, REG_AL); // seto al
- emit_setcc_r8(dst, x86emit::COND_S, REG_CL); // sets cl
- emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al
- emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al
- emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 4, 0)); // lea dstreg,[eax+ecx*4]
- emit_shl_r32_imm(dst, dstreg, 1); // shl dstreg,1
+ a.seto(al); // seto al
+ a.sets(cl); // sets cl
+ a.movzx(eax, al); // movzx eax,al
+ a.movzx(ecx, cl); // movzx ecx,al
+ a.lea(dstreg, ptr(eax, ecx, 2)); // lea dstreg,[eax+ecx*4]
+ a.shl(dstreg, 1); // shl dstreg,1
break;
// zero plus another flag
case FLAG_Z | FLAG_S:
- emit_setcc_r8(dst, x86emit::COND_Z, REG_AL); // setz al
- emit_setcc_r8(dst, x86emit::COND_S, REG_CL); // sets cl
- emit_movzx_r32_r8(dst, REG_EAX, REG_AL); // movzx eax,al
- emit_movzx_r32_r8(dst, REG_ECX, REG_CL); // movzx ecx,al
- emit_lea_r32_m32(dst, dstreg, MBISD(REG_EAX, REG_ECX, 2, 0)); // lea dstreg,[eax+ecx*2]
- emit_shl_r32_imm(dst, dstreg, 2); // shl dstreg,2
+ a.setz(al); // setz al
+ a.sets(cl); // sets cl
+ a.movzx(eax, al); // movzx eax,al
+ a.movzx(ecx, cl); // movzx ecx,al
+ a.lea(dstreg, ptr(eax, ecx, 1)); // lea dstreg,[eax+ecx*2]
+ a.shl(dstreg, 2); // shl dstreg,2
break;
// default cases
default:
- emit_pushf(dst); // pushf
- emit_pop_r32(dst, REG_EAX); // pop eax
- emit_and_r32_imm(dst, REG_EAX, flagmask); // and eax,flagmask
- emit_movzx_r32_m8(dst, dstreg, MABSI(flags_map, REG_EAX)); // movzx dstreg,[flags_map]
+ a.pushfd(); // pushf
+ a.pop(eax); // pop eax
+ a.and_(eax, flagmask); // and eax,flagmask
+ a.movzx(dstreg, byte_ptr(uintptr_t(flags_map), eax)); // movzx dstreg,[flags_map]
break;
}
// store low 32 bits
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
// 64-bit form stores upper 32 bits
if (inst.size() == 8)
{
// general case
if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0
else if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0
}
+
+ a.popfd();
+}
+
+
+//-------------------------------------------------
+// op_setflgs - process a SETFLGS opcode
+//-------------------------------------------------
+
+void drcbe_x86::op_setflgs(Assembler &a, const instruction &inst)
+{
+ assert(inst.size() == 4);
+ assert_no_condition(inst);
+ assert_no_flags(inst);
+
+ be_parameter srcp(*this, inst.param(0), PTYPE_MRI);
+
+ a.pushfd();
+
+ emit_mov_r32_p32(a, eax, srcp);
+
+ a.mov(eax, ptr(uintptr_t(flags_unmap), eax, 2));
+ a.and_(dword_ptr(esp), ~0x8c5);
+ a.or_(dword_ptr(esp), eax);
+
+ a.popfd();
}
@@ -3540,7 +3576,7 @@ void drcbe_x86::op_getflgs(x86code *&dst, const instruction &inst)
// op_save - process a SAVE opcode
//-------------------------------------------------
-void drcbe_x86::op_save(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_save(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3551,8 +3587,8 @@ void drcbe_x86::op_save(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_M);
// copy live state to the destination
- emit_mov_r32_imm(dst, REG_ECX, (uintptr_t)dstp.memory()); // mov ecx,dstp
- emit_call(dst, m_save); // call save
+ a.mov(ecx, imm(dstp.memory())); // mov ecx,dstp
+ a.call(imm(m_save)); // call save
}
@@ -3560,7 +3596,7 @@ void drcbe_x86::op_save(x86code *&dst, const instruction &inst)
// op_restore - process a RESTORE opcode
//-------------------------------------------------
-void drcbe_x86::op_restore(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_restore(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4);
@@ -3570,8 +3606,8 @@ void drcbe_x86::op_restore(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(0), PTYPE_M);
// copy live state from the destination
- emit_mov_r32_imm(dst, REG_ECX, (uintptr_t)srcp.memory()); // mov ecx,dstp
- emit_call(dst, m_restore); // call restore
+ a.mov(ecx, imm(srcp.memory())); // mov ecx,dstp
+ a.call(imm(m_restore)); // call restore
}
@@ -3584,7 +3620,7 @@ void drcbe_x86::op_restore(x86code *&dst, const instruction &inst)
// op_load - process a LOAD opcode
//-------------------------------------------------
-void drcbe_x86::op_load(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_load(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -3595,74 +3631,73 @@ void drcbe_x86::op_load(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter basep(*this, inst.param(1), PTYPE_M);
be_parameter indp(*this, inst.param(2), PTYPE_MRI);
- const parameter &scalesizep = inst.param(3);
+ parameter const &scalesizep = inst.param(3);
assert(scalesizep.is_size_scale());
- int scale = 1 << scalesizep.scale();
- int size = scalesizep.size();
+ int const size = scalesizep.size();
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
- // immediate index
if (indp.is_immediate())
{
+ // immediate index
+ int const scale = 1 << scalesizep.scale();
+
if (size == SIZE_BYTE)
- emit_movzx_r32_m8(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movzx dstreg,[basep + scale*indp]
+ a.movzx(dstreg, MABS(basep.memory(scale*indp.immediate()), 1)); // movzx dstreg,[basep + scale*indp]
else if (size == SIZE_WORD)
- emit_movzx_r32_m16(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movzx dstreg,[basep + scale*indp]
+ a.movzx(dstreg, MABS(basep.memory(scale*indp.immediate()), 2)); // movzx dstreg,[basep + scale*indp]
else if (size == SIZE_DWORD)
- emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
+ a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
else if (size == SIZE_QWORD)
{
- emit_mov_r32_m32(dst, REG_EDX, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4]
- emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
+ a.mov(edx, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4]
+ a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
}
}
-
- // other index
else
{
- int indreg = indp.select_register(REG_ECX);
- emit_mov_r32_p32(dst, indreg, indp);
+ // other index
+ Gp const indreg = indp.select_register(ecx);
+ emit_mov_r32_p32_keepflags(a, indreg, indp);
if (size == SIZE_BYTE)
- emit_movzx_r32_m8(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movzx dstreg,[basep + scale*indp]
+ a.movzx(dstreg, ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 1)); // movzx dstreg,[basep + scale*indp]
else if (size == SIZE_WORD)
- emit_movzx_r32_m16(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movzx dstreg,[basep + scale*indp]
+ a.movzx(dstreg, ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 2)); // movzx dstreg,[basep + scale*indp]
else if (size == SIZE_DWORD)
- emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp]
+ a.mov(dstreg, ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp]
else if (size == SIZE_QWORD)
{
- emit_mov_r32_m32(dst, REG_EDX, MABSI(basep.memory(4), indreg, scale)); // mov edx,[basep + scale*indp + 4]
- emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp]
+ a.mov(edx, ptr(uintptr_t(basep.memory(4)), indreg, scalesizep.scale())); // mov edx,[basep + scale*indp + 4]
+ a.mov(dstreg, ptr(uintptr_t(basep.memory(0)), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp]
}
}
// store low 32 bits
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
// 64-bit form stores upper 32 bits
if (inst.size() == 8)
{
- // 1, 2, or 4-byte case
if (size != SIZE_QWORD)
{
+ // 1, 2, or 4-byte case
if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0
else if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0
}
-
- // 8-byte case
else
{
+ // 8-byte case
if (dstp.is_memory())
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx
+ a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx
else if (dstp.is_int_register())
- emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx
- set_last_upper_reg(dst, dstp, REG_EDX);
+ a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi],edx
+ set_last_upper_reg(a, dstp, edx);
}
}
- set_last_lower_reg(dst, dstp, dstreg);
+ set_last_lower_reg(a, dstp, dstreg);
}
@@ -3670,7 +3705,7 @@ void drcbe_x86::op_load(x86code *&dst, const instruction &inst)
// op_loads - process a LOADS opcode
//-------------------------------------------------
-void drcbe_x86::op_loads(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_loads(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -3681,62 +3716,67 @@ void drcbe_x86::op_loads(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter basep(*this, inst.param(1), PTYPE_M);
be_parameter indp(*this, inst.param(2), PTYPE_MRI);
- const parameter &scalesizep = inst.param(3);
+ parameter const &scalesizep = inst.param(3);
assert(scalesizep.is_size_scale());
- int scale = 1 << scalesizep.scale();
- int size = scalesizep.size();
+ int const size = scalesizep.size();
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
- // immediate index
if (indp.is_immediate())
{
+ // immediate index
+ int const scale = 1 << scalesizep.scale();
+
if (size == SIZE_BYTE)
- emit_movsx_r32_m8(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movsx dstreg,[basep + scale*indp]
+ a.movsx(dstreg, MABS(basep.memory(scale*indp.immediate()), 1)); // movsx dstreg,[basep + scale*indp]
else if (size == SIZE_WORD)
- emit_movsx_r32_m16(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // movsx dstreg,[basep + scale*indp]
+ a.movsx(dstreg, MABS(basep.memory(scale*indp.immediate()), 2)); // movsx dstreg,[basep + scale*indp]
else if (size == SIZE_DWORD)
- emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
+ a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
else if (size == SIZE_QWORD)
{
- emit_mov_r32_m32(dst, REG_EDX, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4]
- emit_mov_r32_m32(dst, dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
+ a.mov(edx, MABS(basep.memory(scale*indp.immediate() + 4))); // mov edx,[basep + scale*indp + 4]
+ a.mov(dstreg, MABS(basep.memory(scale*indp.immediate()))); // mov dstreg,[basep + scale*indp]
}
}
-
- // other index
else
{
- int indreg = indp.select_register(REG_ECX);
- emit_mov_r32_p32(dst, indreg, indp);
+ // other index
+ Gp const indreg = indp.select_register(ecx);
+ emit_mov_r32_p32_keepflags(a, indreg, indp);
if (size == SIZE_BYTE)
- emit_movsx_r32_m8(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movsx dstreg,[basep + scale*indp]
+ a.movsx(dstreg, ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 1)); // movsx dstreg,[basep + scale*indp]
else if (size == SIZE_WORD)
- emit_movsx_r32_m16(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // movsx dstreg,[basep + scale*indp]
+ a.movsx(dstreg, ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 2)); // movsx dstreg,[basep + scale*indp]
else if (size == SIZE_DWORD)
- emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp]
+ a.mov(dstreg, ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp]
else if (size == SIZE_QWORD)
{
- emit_mov_r32_m32(dst, REG_EDX, MABSI(basep.memory(4), indreg, scale)); // mov edx,[basep + scale*indp + 4]
- emit_mov_r32_m32(dst, dstreg, MABSI(basep.memory(), indreg, scale)); // mov dstreg,[basep + scale*indp]
+ a.mov(edx, ptr(uintptr_t(basep.memory(4)), indreg, scalesizep.scale())); // mov edx,[basep + scale*indp + 4]
+ a.mov(dstreg, ptr(uintptr_t(basep.memory(0)), indreg, scalesizep.scale())); // mov dstreg,[basep + scale*indp]
}
}
// store low 32 bits
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
// 64-bit form stores upper 32 bits
if (inst.size() == 8)
{
- emit_cdq(dst); // cdq
+ if (size != SIZE_QWORD)
+ {
+ if (dstreg.id() != eax.id())
+ a.mov(eax, dstreg);
+ a.cdq(); // cdq
+ }
if (dstp.is_memory())
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx
+ a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx
else if (dstp.is_int_register())
- emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx
- set_last_upper_reg(dst, dstp, REG_EDX);
+ a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi],edx
+ set_last_upper_reg(a, dstp, edx);
}
- set_last_lower_reg(dst, dstp, dstreg);
+ set_last_lower_reg(a, dstp, dstreg);
}
@@ -3744,7 +3784,7 @@ void drcbe_x86::op_loads(x86code *&dst, const instruction &inst)
// op_store - process a STORE opcode
//-------------------------------------------------
-void drcbe_x86::op_store(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_store(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -3755,96 +3795,95 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst)
be_parameter basep(*this, inst.param(0), PTYPE_M);
be_parameter indp(*this, inst.param(1), PTYPE_MRI);
be_parameter srcp(*this, inst.param(2), PTYPE_MRI);
- const parameter &scalesizep = inst.param(3);
- int scale = 1 << (scalesizep.scale());
- int size = scalesizep.size();
+ parameter const &scalesizep = inst.param(3);
+ int const size = scalesizep.size();
// pick a source register for the general case
- int srcreg = srcp.select_register(REG_EAX);
- if (size == SIZE_BYTE && (srcreg & 4))
- srcreg = REG_EAX;
+ Gp srcreg = srcp.select_register(eax);
+ if (size == SIZE_BYTE && (srcreg.id() & 4)) // FIXME: &4?
+ srcreg = eax;
- // degenerate case: constant index
if (indp.is_immediate())
{
- // immediate source
+ // degenerate case: constant index
+ int const scale = 1 << (scalesizep.scale());
+
if (srcp.is_immediate())
{
+ // immediate source
if (size == SIZE_BYTE)
- emit_mov_m8_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp
+ a.mov(MABS(basep.memory(scale*indp.immediate()), 1), srcp.immediate()); // mov [basep + scale*indp],srcp
else if (size == SIZE_WORD)
- emit_mov_m16_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp
+ a.mov(MABS(basep.memory(scale*indp.immediate()), 2), srcp.immediate()); // mov [basep + scale*indp],srcp
else if (size == SIZE_DWORD)
- emit_mov_m32_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp
+ a.mov(MABS(basep.memory(scale*indp.immediate()), 4), srcp.immediate()); // mov [basep + scale*indp],srcp
else if (size == SIZE_QWORD)
{
- emit_mov_m32_imm(dst, MABS(basep.memory(scale*indp.immediate())), srcp.immediate()); // mov [basep + scale*indp],srcp
- emit_mov_m32_imm(dst, MABS(basep.memory(scale*indp.immediate() + 4)), srcp.immediate() >> 32);
+ a.mov(MABS(basep.memory(scale*indp.immediate()), 4), srcp.immediate()); // mov [basep + scale*indp],srcp
+ a.mov(MABS(basep.memory(scale*indp.immediate() + 4), 4), srcp.immediate() >> 32);
// mov [basep + scale*indp + 4],srcp >> 32
}
}
-
- // variable source
else
{
+ // variable source
if (size != SIZE_QWORD)
- emit_mov_r32_p32(dst, srcreg, srcp); // mov srcreg,srcp
+ emit_mov_r32_p32_keepflags(a, srcreg, srcp); // mov srcreg,srcp
else
- emit_mov_r64_p64(dst, srcreg, REG_EDX, srcp); // mov edx:srcreg,srcp
+ emit_mov_r64_p64_keepflags(a, srcreg, edx, srcp); // mov edx:srcreg,srcp
+
if (size == SIZE_BYTE)
- emit_mov_m8_r8(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg
+ a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg.r8()); // mov [basep + scale*indp],srcreg
else if (size == SIZE_WORD)
- emit_mov_m16_r16(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg
+ a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg.r16()); // mov [basep + scale*indp],srcreg
else if (size == SIZE_DWORD)
- emit_mov_m32_r32(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg
+ a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg
else if (size == SIZE_QWORD)
{
- emit_mov_m32_r32(dst, MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg
- emit_mov_m32_r32(dst, MABS(basep.memory(scale*indp.immediate() + 4)), REG_EDX); // mov [basep + scale*indp + 4],edx
+ a.mov(MABS(basep.memory(scale*indp.immediate())), srcreg); // mov [basep + scale*indp],srcreg
+ a.mov(MABS(basep.memory(scale*indp.immediate() + 4)), edx); // mov [basep + scale*indp + 4],edx
}
}
}
-
- // normal case: variable index
else
{
- int indreg = indp.select_register(REG_ECX);
- emit_mov_r32_p32(dst, indreg, indp); // mov indreg,indp
+ // normal case: variable index
+ Gp const indreg = indp.select_register(ecx);
+ emit_mov_r32_p32_keepflags(a, indreg, indp); // mov indreg,indp
- // immediate source
if (srcp.is_immediate())
{
+ // immediate source
if (size == SIZE_BYTE)
- emit_mov_m8_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 1*ecx],srcp
+ a.mov(ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 1), srcp.immediate()); // mov [basep + 1*ecx],srcp
else if (size == SIZE_WORD)
- emit_mov_m16_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 2*ecx],srcp
+ a.mov(ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 2), srcp.immediate()); // mov [basep + 2*ecx],srcp
else if (size == SIZE_DWORD)
- emit_mov_m32_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 4*ecx],srcp
+ a.mov(ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale(), 4), srcp.immediate()); // mov [basep + 4*ecx],srcp
else if (size == SIZE_QWORD)
{
- emit_mov_m32_imm(dst, MABSI(basep.memory(), indreg, scale), srcp.immediate()); // mov [basep + 8*ecx],srcp
- emit_mov_m32_imm(dst, MABSI(basep.memory(4), indreg, scale), srcp.immediate() >> 32);
+ a.mov(ptr(uintptr_t(basep.memory(0)), indreg, scalesizep.scale(), 4), srcp.immediate()); // mov [basep + 8*ecx],srcp
+ a.mov(ptr(uintptr_t(basep.memory(4)), indreg, scalesizep.scale(), 4), srcp.immediate() >> 32);
// mov [basep + 8*ecx + 4],srcp >> 32
}
}
-
- // variable source
else
{
+ // variable source
if (size != SIZE_QWORD)
- emit_mov_r32_p32(dst, srcreg, srcp); // mov srcreg,srcp
+ emit_mov_r32_p32_keepflags(a, srcreg, srcp); // mov srcreg,srcp
else
- emit_mov_r64_p64(dst, srcreg, REG_EDX, srcp); // mov edx:srcreg,srcp
+ emit_mov_r64_p64_keepflags(a, srcreg, edx, srcp); // mov edx:srcreg,srcp
if (size == SIZE_BYTE)
- emit_mov_m8_r8(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 1*ecx],srcreg
+ a.mov(ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale()), srcreg.r8()); // mov [basep + 1*ecx],srcreg
else if (size == SIZE_WORD)
- emit_mov_m16_r16(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 2*ecx],srcreg
+ a.mov(ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale()), srcreg.r16()); // mov [basep + 2*ecx],srcreg
else if (size == SIZE_DWORD)
- emit_mov_m32_r32(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 4*ecx],srcreg
+ a.mov(ptr(uintptr_t(basep.memory()), indreg, scalesizep.scale()), srcreg); // mov [basep + 4*ecx],srcreg
else if (size == SIZE_QWORD)
{
- emit_mov_m32_r32(dst, MABSI(basep.memory(), indreg, scale), srcreg); // mov [basep + 8*ecx],srcreg
- emit_mov_m32_r32(dst, MABSI(basep.memory(4), indreg, scale), REG_EDX); // mov [basep + 8*ecx],edx
+ a.mov(ptr(uintptr_t(basep.memory(0)), indreg, scalesizep.scale()), srcreg); // mov [basep + 8*ecx],srcreg
+ a.mov(ptr(uintptr_t(basep.memory(4)), indreg, scalesizep.scale()), edx); // mov [basep + 8*ecx],edx
}
}
}
@@ -3855,7 +3894,7 @@ void drcbe_x86::op_store(x86code *&dst, const instruction &inst)
// op_read - process a READ opcode
//-------------------------------------------------
-void drcbe_x86::op_read(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_read(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -3865,62 +3904,81 @@ void drcbe_x86::op_read(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter addrp(*this, inst.param(1), PTYPE_MRI);
- const parameter &spacesizep = inst.param(2);
+ parameter const &spacesizep = inst.param(2);
assert(spacesizep.is_size_space());
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
- // set up a call to the read byte handler
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space
+ // set up a call to the read handler
+ auto const &accessors = m_memory_accessors[spacesizep.space()];
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp);
if (spacesizep.size() == SIZE_BYTE)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_byte);
- // call read_byte
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_byte.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_byte.obj));
+ a.call(imm(accessors.read_byte.func));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
+ a.movzx(dstreg, al);
}
else if (spacesizep.size() == SIZE_WORD)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_word);
- // call read_word
- emit_movzx_r32_r16(dst, dstreg, REG_AX); // movzx dstreg,ax
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_word.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_word.obj));
+ a.call(imm(accessors.read_word.func));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
+ a.movzx(dstreg, ax);
}
else if (spacesizep.size() == SIZE_DWORD)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_dword);
- // call read_dword
- emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_dword.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_dword.obj));
+ a.call(imm(accessors.read_dword.func));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
+ a.mov(dstreg, eax);
}
else if (spacesizep.size() == SIZE_QWORD)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_qword);
- // call read_qword
- emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_qword.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_qword.obj));
+ a.call(imm(accessors.read_qword.func));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
+ a.mov(dstreg, eax);
}
// store low 32 bits
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg);
// 64-bit form stores upper 32 bits
if (inst.size() == 8)
{
- // 1, 2, or 4-byte case
if (spacesizep.size() != SIZE_QWORD)
{
+ // 1, 2, or 4-byte case
if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0
+ a.mov(MABS(dstp.memory(4), 4), 0);
else if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0);
}
-
- // 8-byte case
else
{
+ // 8-byte case
if (dstp.is_memory())
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx
+ a.mov(MABS(dstp.memory(4)), edx);
else if (dstp.is_int_register())
- emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx
+ a.mov(MABS(m_reghi[dstp.ireg()]), edx);
}
}
}
@@ -3930,7 +3988,7 @@ void drcbe_x86::op_read(x86code *&dst, const instruction &inst)
// op_readm - process a READM opcode
//-------------------------------------------------
-void drcbe_x86::op_readm(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_readm(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -3941,60 +3999,85 @@ void drcbe_x86::op_readm(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter addrp(*this, inst.param(1), PTYPE_MRI);
be_parameter maskp(*this, inst.param(2), PTYPE_MRI);
- const parameter &spacesizep = inst.param(3);
+ parameter const &spacesizep = inst.param(3);
assert(spacesizep.is_size_space());
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
// set up a call to the read byte handler
+ auto const &accessors = m_memory_accessors[spacesizep.space()];
if (spacesizep.size() != SIZE_QWORD)
- emit_mov_m32_p32(dst, MBD(REG_ESP, 8), maskp); // mov [esp+8],maskp
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 4 : 8), maskp);
else
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), maskp); // mov [esp+8],maskp
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space
- if (spacesizep.size() == SIZE_WORD)
+ emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 4 : 8), maskp);
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp);
+ if (spacesizep.size() == SIZE_BYTE)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_word_masked);
- // call read_word_masked
- emit_movzx_r32_r16(dst, dstreg, REG_AX); // movzx dstreg,ax
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_byte_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_byte_masked.obj));
+ a.call(imm(accessors.read_byte_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ a.movzx(dstreg, al);
+ }
+ else if (spacesizep.size() == SIZE_WORD)
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_word_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_word_masked.obj));
+ a.call(imm(accessors.read_word_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ a.movzx(dstreg, ax);
}
else if (spacesizep.size() == SIZE_DWORD)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_dword_masked);
- // call read_dword_masked
- emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_dword_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_dword_masked.obj));
+ a.call(imm(accessors.read_dword_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ a.mov(dstreg, eax);
}
else if (spacesizep.size() == SIZE_QWORD)
{
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].read_qword_masked);
- // call read_qword_masked
- emit_mov_r32_r32(dst, dstreg, REG_EAX); // mov dstreg,eax
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.read_qword_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.read_qword_masked.obj));
+ a.call(imm(accessors.read_qword_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 12);
+ a.mov(dstreg, eax);
}
// store low 32 bits
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg);
// 64-bit form stores upper 32 bits
if (inst.size() == 8)
{
- // 1, 2, or 4-byte case
if (spacesizep.size() != SIZE_QWORD)
{
+ // 1, 2, or 4-byte case
if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0
+ a.mov(MABS(dstp.memory(4), 4), 0);
else if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0);
}
-
- // 8-byte case
else
{
+ // 8-byte case
if (dstp.is_memory())
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx
+ a.mov(MABS(dstp.memory(4)), edx);
else if (dstp.is_int_register())
- emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi],edx
+ a.mov(MABS(m_reghi[dstp.ireg()]), edx);
}
}
}
@@ -4004,7 +4087,7 @@ void drcbe_x86::op_readm(x86code *&dst, const instruction &inst)
// op_write - process a WRITE opcode
//-------------------------------------------------
-void drcbe_x86::op_write(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_write(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4014,28 +4097,56 @@ void drcbe_x86::op_write(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter addrp(*this, inst.param(0), PTYPE_MRI);
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
- const parameter &spacesizep = inst.param(2);
+ parameter const &spacesizep = inst.param(2);
assert(spacesizep.is_size_space());
// set up a call to the write byte handler
+ auto const &accessors = m_memory_accessors[spacesizep.space()];
if (spacesizep.size() != SIZE_QWORD)
- emit_mov_m32_p32(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 4 : 8), srcp);
else
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space
+ emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 4 : 8), srcp);
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp);
if (spacesizep.size() == SIZE_BYTE)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_byte);
- // call write_byte
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_byte.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_byte.obj));
+ a.call(imm(accessors.write_byte.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ }
else if (spacesizep.size() == SIZE_WORD)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_word);
- // call write_word
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_word.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_word.obj));
+ a.call(imm(accessors.write_word.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ }
else if (spacesizep.size() == SIZE_DWORD)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_dword);
- // call write_dword
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_dword.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_dword.obj));
+ a.call(imm(accessors.write_dword.func));
+ if (USE_THISCALL)
+ a.sub(esp, 8);
+ }
else if (spacesizep.size() == SIZE_QWORD)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_qword);
- // call write_qword
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_qword.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_qword.obj));
+ a.call(imm(accessors.write_qword.func));
+ if (USE_THISCALL)
+ a.sub(esp, 12);
+ }
}
@@ -4043,7 +4154,7 @@ void drcbe_x86::op_write(x86code *&dst, const instruction &inst)
// op_writem - process a WRITEM opcode
//-------------------------------------------------
-void drcbe_x86::op_writem(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_writem(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4054,31 +4165,62 @@ void drcbe_x86::op_writem(x86code *&dst, const instruction &inst)
be_parameter addrp(*this, inst.param(0), PTYPE_MRI);
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
be_parameter maskp(*this, inst.param(2), PTYPE_MRI);
- const parameter &spacesizep = inst.param(3);
+ parameter const &spacesizep = inst.param(3);
assert(spacesizep.is_size_space());
// set up a call to the write byte handler
+ auto const &accessors = m_memory_accessors[spacesizep.space()];
if (spacesizep.size() != SIZE_QWORD)
{
- emit_mov_m32_p32(dst, MBD(REG_ESP, 12), maskp); // mov [esp+12],maskp
- emit_mov_m32_p32(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 8 : 12), maskp);
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 4 : 8), srcp);
}
else
{
- emit_mov_m64_p64(dst, MBD(REG_ESP, 16), maskp); // mov [esp+16],maskp
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp
+ emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 12 : 16), maskp);
+ emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 4 : 8), srcp);
+ }
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp);
+ if (spacesizep.size() == SIZE_BYTE)
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_byte_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_byte_masked.obj));
+ a.call(imm(accessors.write_byte_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 12);
+ }
+ else if (spacesizep.size() == SIZE_WORD)
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_word_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_word_masked.obj));
+ a.call(imm(accessors.write_word_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 12);
}
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacesizep.space()]); // mov [esp],space
- if (spacesizep.size() == SIZE_WORD)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_word_masked);
- // call write_word_masked
else if (spacesizep.size() == SIZE_DWORD)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_dword_masked);
- // call write_dword_masked
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_dword_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_dword_masked.obj));
+ a.call(imm(accessors.write_dword_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 12);
+ }
else if (spacesizep.size() == SIZE_QWORD)
- emit_call(dst, (x86code *)m_accessors[spacesizep.space()].write_qword_masked);
- // call write_qword_masked
+ {
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessors.write_qword_masked.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessors.write_qword_masked.obj));
+ a.call(imm(accessors.write_qword_masked.func));
+ if (USE_THISCALL)
+ a.sub(esp, 20);
+ }
}
@@ -4086,7 +4228,7 @@ void drcbe_x86::op_writem(x86code *&dst, const instruction &inst)
// op_carry - process a CARRY opcode
//-------------------------------------------------
-void drcbe_x86::op_carry(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_carry(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4100,59 +4242,116 @@ void drcbe_x86::op_carry(x86code *&dst, const instruction &inst)
// degenerate case: source is immediate
if (srcp.is_immediate() && bitp.is_immediate())
{
- if (srcp.immediate() & ((uint64_t)1 << bitp.immediate()))
- emit_stc(dst);
+ if (srcp.immediate() & ((uint64_t)1 << (bitp.immediate() & (inst.size() * 8 - 1))))
+ a.stc();
else
- emit_clc(dst);
- }
+ a.clc();
+
+ return;
+ }
// load non-immediate bit numbers into a register
if (!bitp.is_immediate())
{
- emit_mov_r32_p32(dst, REG_ECX, bitp);
- emit_and_r32_imm(dst, REG_ECX, inst.size() * 8 - 1);
+ emit_mov_r32_p32(a, ecx, bitp);
+ a.and_(ecx, inst.size() * 8 - 1);
}
// 32-bit form
if (inst.size() == 4)
{
+ if (srcp.is_immediate())
+ emit_mov_r32_p32(a, edx, srcp);
+
if (bitp.is_immediate())
{
if (srcp.is_memory())
- emit_bt_m32_imm(dst, MABS(srcp.memory()), bitp.immediate()); // bt [srcp],bitp
+ a.bt(MABS(srcp.memory(), 4), (bitp.immediate() & (inst.size() * 8 - 1)));
else if (srcp.is_int_register())
- emit_bt_r32_imm(dst, srcp.ireg(), bitp.immediate()); // bt srcp,bitp
+ a.bt(Gpd(srcp.ireg()), (bitp.immediate() & (inst.size() * 8 - 1)));
+ else if (srcp.is_immediate())
+ a.bt(edx, (bitp.immediate() & (inst.size() * 8 - 1)));
}
else
{
if (srcp.is_memory())
- emit_bt_m32_r32(dst, MABS(srcp.memory()), REG_ECX); // bt [srcp],ecx
+ a.bt(MABS(srcp.memory()), ecx);
else if (srcp.is_int_register())
- emit_bt_r32_r32(dst, srcp.ireg(), REG_ECX); // bt [srcp],ecx
+ a.bt(Gpd(srcp.ireg()), ecx);
+ else if (srcp.is_immediate())
+ a.bt(edx, ecx);
}
}
// 64-bit form
else
{
+ if (srcp.is_immediate())
+ emit_mov_r64_p64(a, ebx, edx, srcp);
+
if (bitp.is_immediate())
{
- if (srcp.is_memory())
- emit_bt_m32_imm(dst, MABS(srcp.memory()), bitp.immediate()); // bt [srcp],bitp
- else if (srcp.is_int_register() && bitp.immediate() < 32)
- emit_bt_r32_imm(dst, srcp.ireg(), bitp.immediate()); // bt srcp,bitp
- else if (srcp.is_int_register() && bitp.immediate() >= 32)
- emit_bt_m32_imm(dst, MABS(m_reghi[srcp.ireg()]), bitp.immediate() - 32); // bt [srcp.hi],bitp
+ const uint32_t bitshift = bitp.immediate() & (inst.size() * 8 - 1);
+ if (bitshift < 32)
+ {
+ if (srcp.is_memory())
+ a.bt(MABS(srcp.memory(), 4), bitshift);
+ else if (srcp.is_int_register())
+ a.bt(Gpd(srcp.ireg()), bitshift);
+ else if (srcp.is_immediate())
+ a.bt(ebx, bitshift);
+ }
+ else if (bitshift >= 32)
+ {
+ if (srcp.is_memory())
+ a.bt(MABS((uint8_t*)srcp.memory() + 4, 4), bitshift - 32);
+ else if (srcp.is_int_register())
+ a.bt(MABS(m_reghi[srcp.ireg()], 4), bitshift - 32);
+ else if (srcp.is_immediate())
+ a.bt(edx, bitshift);
+ }
}
else
{
+ Label end = a.newLabel();
+ Label higher = a.newLabel();
+
+ a.cmp(ecx, 32);
+ a.short_().jge(higher);
+
if (srcp.is_memory())
- emit_bt_m32_r32(dst, MABS(srcp.memory()), REG_ECX); // bt [srcp],ecx
+ {
+ a.bt(MABS(srcp.memory(), 4), ecx);
+ }
else if (srcp.is_int_register())
{
- emit_mov_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), srcp.ireg()); // mov [srcp.lo],srcp
- emit_bt_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), REG_ECX); // bt [srcp],ecx
+ a.mov(MABS(m_reglo[srcp.ireg()], 4), Gpd(srcp.ireg())); // mov [srcp.lo],srcp
+ a.bt(MABS(m_reglo[srcp.ireg()], 4), ecx); // bt [srcp],ecx
+ }
+ else if (srcp.is_immediate())
+ {
+ a.bt(ebx, ecx);
}
+
+ a.short_().jmp(end);
+
+ a.bind(higher);
+ a.sub(ecx, 32);
+
+ if (srcp.is_memory())
+ {
+ a.bt(MABS((uint8_t*)srcp.memory() + 4, 4), ecx);
+ }
+ else if (srcp.is_int_register())
+ {
+ a.bt(MABS(m_reghi[srcp.ireg()], 4), ecx);
+ }
+ else if (srcp.is_immediate())
+ {
+ a.bt(edx, ecx);
+ }
+
+ a.bind(end);
}
}
}
@@ -4162,7 +4361,7 @@ void drcbe_x86::op_carry(x86code *&dst, const instruction &inst)
// op_set - process a SET opcode
//-------------------------------------------------
-void drcbe_x86::op_set(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_set(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4173,23 +4372,23 @@ void drcbe_x86::op_set(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
// set to AL
- emit_setcc_r8(dst, X86_CONDITION(inst.condition()), REG_AL); // setcc al
- emit_movzx_r32_r8(dst, dstreg, REG_AL); // movzx dstreg,al
+ a.set(X86_CONDITION(inst.condition()), al); // setcc al
+ a.movzx(dstreg, al); // movzx dstreg,al
// store low 32 bits
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
// 64-bit form stores upper 32 bits
if (inst.size() == 8)
{
// general case
if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov [dstp+4],0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0
else if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov [reghi],0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0
}
}
@@ -4198,10 +4397,8 @@ void drcbe_x86::op_set(x86code *&dst, const instruction &inst)
// op_mov - process a MOV opcode
//-------------------------------------------------
-void drcbe_x86::op_mov(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_mov(Assembler &a, const instruction &inst)
{
- x86code *savedst = dst;
-
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
assert_any_condition(inst);
@@ -4212,47 +4409,42 @@ void drcbe_x86::op_mov(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
- // always start with a jmp
- emit_link skip = { nullptr };
- if (inst.condition() != uml::COND_ALWAYS)
- emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ // add a conditional branch unless a conditional move is possible
+ Label skip;
+ if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate())))
+ {
+ skip = a.newLabel();
+ a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ }
// 32-bit form
if (inst.size() == 4)
{
// register to memory
if (dstp.is_memory() && srcp.is_int_register())
- emit_mov_m32_r32(dst, MABS(dstp.memory()), srcp.ireg()); // mov [dstp],srcp
+ a.mov(MABS(dstp.memory()), Gpd(srcp.ireg())); // mov [dstp],srcp
// immediate to memory
else if (dstp.is_memory() && srcp.is_immediate())
- emit_mov_m32_imm(dst, MABS(dstp.memory()), srcp.immediate()); // mov [dstp],srcp
+ a.mov(MABS(dstp.memory(), 4), srcp.immediate()); // mov [dstp],srcp
// conditional memory to register
else if (inst.condition() != uml::COND_ALWAYS && dstp.is_int_register() && srcp.is_memory())
- {
- dst = savedst;
- skip.target = nullptr;
- emit_cmovcc_r32_m32(dst, X86_CONDITION(inst.condition()), dstp.ireg(), MABS(srcp.memory()));
+ a.cmov(X86_CONDITION(inst.condition()), Gpd(dstp.ireg()), MABS(srcp.memory()));
// cmovcc dstp,[srcp]
- }
// conditional register to register
else if (inst.condition() != uml::COND_ALWAYS && dstp.is_int_register() && srcp.is_int_register())
- {
- dst = savedst;
- skip.target = nullptr;
- emit_cmovcc_r32_r32(dst, X86_CONDITION(inst.condition()), dstp.ireg(), srcp.ireg());
+ a.cmov(X86_CONDITION(inst.condition()), Gpd(dstp.ireg()), Gpd(srcp.ireg()));
// cmovcc dstp,srcp
- }
// general case
else
{
- emit_mov_r32_p32_keepflags(dst, dstreg, srcp); // mov dstreg,srcp
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32_keepflags(a, dstreg, srcp); // mov dstreg,srcp
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -4262,29 +4454,32 @@ void drcbe_x86::op_mov(x86code *&dst, const instruction &inst)
// register to memory
if (dstp.is_memory() && srcp.is_int_register())
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(m_reghi[srcp.ireg()])); // mov eax,reghi[srcp]
- emit_mov_m32_r32(dst, MABS(dstp.memory()), srcp.ireg()); // mov [dstp],srcp
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EAX); // mov [dstp+4],eax
+ a.mov(eax, MABS(m_reghi[srcp.ireg()])); // mov eax,reghi[srcp]
+ a.mov(MABS(dstp.memory(0)), Gpd(srcp.ireg())); // mov [dstp],srcp
+ a.mov(MABS(dstp.memory(4)), eax); // mov [dstp+4],eax
}
// immediate to memory
else if (dstp.is_memory() && srcp.is_immediate())
{
- emit_mov_m32_imm(dst, MABS(dstp.memory()), srcp.immediate()); // mov [dstp],srcp
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), srcp.immediate() >> 32); // mov [dstp+4],srcp >> 32
+ a.mov(MABS(dstp.memory(0), 4), srcp.immediate()); // mov [dstp],srcp
+ a.mov(MABS(dstp.memory(4), 4), srcp.immediate() >> 32); // mov [dstp+4],srcp >> 32
}
// general case
else
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, srcp); // mov edx:dstreg,srcp
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg
+ emit_mov_r64_p64_keepflags(a, dstreg, edx, srcp); // mov edx:dstreg,srcp
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
- // resolve the jump
- if (skip.target != nullptr)
- track_resolve_link(dst, skip);
+ // bind the label
+ if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate())))
+ {
+ a.bind(skip);
+ reset_last_upper_lower_reg();
+ }
}
@@ -4292,7 +4487,7 @@ void drcbe_x86::op_mov(x86code *&dst, const instruction &inst)
// op_sext - process a SEXT opcode
//-------------------------------------------------
-void drcbe_x86::op_sext(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_sext(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4302,50 +4497,60 @@ void drcbe_x86::op_sext(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
- const parameter &sizep = inst.param(2);
+ parameter const &sizep = inst.param(2);
assert(sizep.is_size());
// pick a target register for the general case
- int dstreg = (inst.size() == 8) ? REG_EAX : dstp.select_register(REG_EAX);
+ Gp const dstreg = eax;
// convert 8-bit source registers to EAX
if (sizep.size() == SIZE_BYTE && srcp.is_int_register() && (srcp.ireg() & 4))
{
- emit_mov_r32_r32(dst, REG_EAX, srcp.ireg()); // mov eax,srcp
- srcp = be_parameter::make_ireg(REG_EAX);
+ a.mov(eax, Gpd(srcp.ireg())); // mov eax,srcp
+ srcp = be_parameter::make_ireg(eax.id());
}
// general case
if (srcp.is_memory())
{
if (sizep.size() == SIZE_BYTE)
- emit_movsx_r32_m8(dst, dstreg, MABS(srcp.memory())); // movsx dstreg,[srcp]
+ a.movsx(dstreg, MABS(srcp.memory(), 1)); // movsx dstreg,[srcp]
else if (sizep.size() == SIZE_WORD)
- emit_movsx_r32_m16(dst, dstreg, MABS(srcp.memory())); // movsx dstreg,[srcp]
+ a.movsx(dstreg, MABS(srcp.memory(), 2)); // movsx dstreg,[srcp]
else if (sizep.size() == SIZE_DWORD)
- emit_mov_r32_m32(dst, dstreg, MABS(srcp.memory())); // mov dstreg,[srcp]
+ a.mov(dstreg, MABS(srcp.memory())); // mov dstreg,[srcp]
}
else if (srcp.is_int_register())
{
if (sizep.size() == SIZE_BYTE)
- emit_movsx_r32_r8(dst, dstreg, srcp.ireg()); // movsx dstreg,srcp
+ a.movsx(dstreg, GpbLo(srcp.ireg())); // movsx dstreg,srcp
else if (sizep.size() == SIZE_WORD)
- emit_movsx_r32_r16(dst, dstreg, srcp.ireg()); // movsx dstreg,srcp
- else if (sizep.size() == SIZE_DWORD && dstreg != srcp.ireg())
- emit_mov_r32_r32(dst, dstreg, srcp.ireg()); // mov dstreg,srcp
+ a.movsx(dstreg, Gpw(srcp.ireg())); // movsx dstreg,srcp
+ else if (sizep.size() == SIZE_DWORD && dstreg.id() != srcp.ireg())
+ a.mov(dstreg, Gpd(srcp.ireg())); // mov dstreg,srcp
+ }
+ else if (srcp.is_immediate())
+ {
+ if (sizep.size() == SIZE_BYTE)
+ a.mov(dstreg, (int8_t)srcp.immediate());
+ else if (sizep.size() == SIZE_WORD)
+ a.mov(dstreg, (int16_t)srcp.immediate());
+ else if (sizep.size() == SIZE_DWORD)
+ a.mov(dstreg, (int32_t)srcp.immediate());
}
+
if (inst.flags() != 0)
- emit_test_r32_r32(dst, dstreg, dstreg); // test dstreg,dstreg
+ a.test(dstreg, dstreg); // test dstreg,dstreg
// 32-bit form: store the low 32 bits
if (inst.size() == 4)
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
// 64-bit form: sign extend to 64 bits and store edx:eax
else if (inst.size() == 8)
{
- emit_cdq(dst); // cdq
- emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax
+ a.cdq(); // cdq
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:eax
}
}
@@ -4354,7 +4559,7 @@ void drcbe_x86::op_sext(x86code *&dst, const instruction &inst)
// op_roland - process an ROLAND opcode
//-------------------------------------------------
-void drcbe_x86::op_roland(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_roland(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4368,24 +4573,42 @@ void drcbe_x86::op_roland(x86code *&dst, const instruction &inst)
be_parameter maskp(*this, inst.param(3), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, shiftp, maskp);
+ Gp const dstreg = dstp.select_register(eax, shiftp, maskp);
// 32-bit form
if (inst.size() == 4)
{
- emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,srcp
- emit_rol_r32_p32(dst, dstreg, shiftp, inst); // rol dstreg,shiftp
- emit_and_r32_p32(dst, dstreg, maskp, inst); // and dstreg,maskp
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,srcp
+ shift_op_param(a, Inst::kIdRol, inst.size(), dstreg, shiftp, // rol dstreg,shiftp
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, false);
+ alu_op_param(a, Inst::kIdAnd, dstreg, maskp, // and dstreg,maskp
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && !src.immediate())
+ {
+ a.xor_(dst.as<Gpd>(), dst.as<Gpd>());
+ return true;
+ }
+ else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ return true;
+
+ return false;
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
// 64-bit form
else if (inst.size() == 8)
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, srcp); // mov edx:dstreg,srcp
- emit_rol_r64_p64(dst, dstreg, REG_EDX, shiftp, inst); // rol edx:dstreg,shiftp
- emit_and_r64_p64(dst, dstreg, REG_EDX, maskp, inst); // and edx:dstreg,maskp
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg
+ emit_mov_r64_p64(a, dstreg, edx, srcp); // mov edx:dstreg,srcp
+ emit_rol_r64_p64(a, dstreg, edx, shiftp, inst); // rol edx:dstreg,shiftp
+ emit_and_r64_p64(a, dstreg, edx, maskp, inst); // and edx:dstreg,maskp
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -4394,7 +4617,7 @@ void drcbe_x86::op_roland(x86code *&dst, const instruction &inst)
// op_rolins - process an ROLINS opcode
//-------------------------------------------------
-void drcbe_x86::op_rolins(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_rolins(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4408,89 +4631,108 @@ void drcbe_x86::op_rolins(x86code *&dst, const instruction &inst)
be_parameter maskp(*this, inst.param(3), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_ECX, shiftp, maskp);
+ Gp const dstreg = dstp.select_register(ecx, shiftp, maskp);
- // 32-bit form
if (inst.size() == 4)
{
- emit_mov_r32_p32(dst, REG_EAX, srcp); // mov eax,srcp
- emit_rol_r32_p32(dst, REG_EAX, shiftp, inst); // rol eax,shiftp
- emit_mov_r32_p32(dst, dstreg, dstp); // mov dstreg,dstp
+ // 32-bit form
+ emit_mov_r32_p32(a, eax, srcp); // mov eax,srcp
+ shift_op_param(a, Inst::kIdRol, inst.size(), eax, shiftp, // rol eax,shiftp
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, false);
+ emit_mov_r32_p32(a, dstreg, dstp); // mov dstreg,dstp
if (maskp.is_immediate())
{
- emit_and_r32_imm(dst, REG_EAX, maskp.immediate()); // and eax,maskp
- emit_and_r32_imm(dst, dstreg, ~maskp.immediate()); // and dstreg,~maskp
+ a.and_(eax, maskp.immediate()); // and eax,maskp
+ a.and_(dstreg, ~maskp.immediate()); // and dstreg,~maskp
}
else
{
- emit_mov_r32_p32(dst, REG_EDX, maskp); // mov edx,maskp
- emit_and_r32_r32(dst, REG_EAX, REG_EDX); // and eax,edx
- emit_not_r32(dst, REG_EDX); // not edx
- emit_and_r32_r32(dst, dstreg, REG_EDX); // and dstreg,edx
+ emit_mov_r32_p32(a, edx, maskp); // mov edx,maskp
+ a.and_(eax, edx); // and eax,edx
+ a.not_(edx); // not edx
+ a.and_(dstreg, edx); // and dstreg,edx
}
- emit_or_r32_r32(dst, dstreg, REG_EAX); // or dstreg,eax
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
- }
+ a.or_(dstreg, eax); // or dstreg,eax
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
- // 64-bit form
+ if (inst.flags())
+ a.test(dstreg, dstreg);
+ }
else if (inst.size() == 8)
{
- emit_mov_r64_p64(dst, REG_EAX, REG_EDX, srcp); // mov edx:eax,srcp
- emit_rol_r64_p64(dst, REG_EAX, REG_EDX, shiftp, inst); // rol edx:eax,shiftp
+ // 64-bit form
+ emit_mov_r64_p64(a, eax, edx, srcp); // mov edx:eax,srcp
+ emit_rol_r64_p64(a, eax, edx, shiftp, inst); // rol edx:eax,shiftp
if (maskp.is_immediate())
{
- emit_and_r32_imm(dst, REG_EAX, maskp.immediate()); // and eax,maskp
- emit_and_r32_imm(dst, REG_EDX, maskp.immediate() >> 32); // and edx,maskp >> 32
+ a.and_(eax, maskp.immediate()); // and eax,maskp
+ a.and_(edx, maskp.immediate() >> 32); // and edx,maskp >> 32
if (dstp.is_int_register())
{
- emit_and_r32_imm(dst, dstp.ireg(), ~maskp.immediate()); // and dstp.lo,~maskp
- emit_and_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), ~maskp.immediate() >> 32);// and dstp.hi,~maskp >> 32
- emit_or_r32_r32(dst, dstp.ireg(), REG_EAX); // or dstp.lo,eax
- emit_or_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // or dstp.hi,edx
+ a.and_(Gpd(dstp.ireg()), ~maskp.immediate()); // and dstp.lo,~maskp
+ a.and_(MABS(m_reghi[dstp.ireg()], 4), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32
+ a.or_(Gpd(dstp.ireg()), eax); // or dstp.lo,eax
+ a.or_(MABS(m_reghi[dstp.ireg()]), edx); // or dstp.hi,edx
}
else
{
- emit_and_m32_imm(dst, MABS(dstp.memory()), ~maskp.immediate()); // and dstp.lo,~maskp
- emit_and_m32_imm(dst, MABS(dstp.memory(4)), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32
- emit_or_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // or dstp.lo,eax
- emit_or_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // or dstp.hi,edx
+ a.and_(MABS(dstp.memory(0), 4), ~maskp.immediate()); // and dstp.lo,~maskp
+ a.and_(MABS(dstp.memory(4), 4), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32
+ a.or_(MABS(dstp.memory(0)), eax); // or dstp.lo,eax
+ a.or_(MABS(dstp.memory(4)), edx); // or dstp.hi,edx
}
}
else
{
- int tempreg = REG_EBX;
- emit_mov_m32_r32(dst, MBD(REG_ESP, -8), tempreg); // mov [esp-8],ebx
- emit_mov_r64_p64(dst, tempreg, REG_ECX, maskp); // mov ecx:ebx,maskp
- emit_and_r32_r32(dst, REG_EAX, tempreg); // and eax,ebx
- emit_and_r32_r32(dst, REG_EDX, REG_ECX); // and edx,ecx
- emit_not_r32(dst, tempreg); // not ebx
- emit_not_r32(dst, REG_ECX); // not ecx
+ a.mov(ptr(esp, -8), ebx); // mov [esp-8],ebx
+ emit_mov_r64_p64(a, ebx, ecx, maskp); // mov ecx:ebx,maskp
+ a.and_(eax, ebx); // and eax,ebx
+ a.and_(edx, ecx); // and edx,ecx
+ a.not_(ebx); // not ebx
+ a.not_(ecx); // not ecx
if (dstp.is_int_register())
{
- emit_and_r32_r32(dst, dstp.ireg(), tempreg); // and dstp.lo,ebx
- emit_and_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_ECX); // and dstp.hi,ecx
- emit_or_r32_r32(dst, dstp.ireg(), REG_EAX); // or dstp.lo,eax
- emit_or_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // or dstp.hi,edx
+ if (dstp.ireg() == Gp::kIdBx)
+ a.and_(ptr(esp, -8), ebx); // and dstp.lo,ebx
+ else
+ a.and_(Gpd(dstp.ireg()), ebx); // and dstp.lo,ebx
+ a.and_(MABS(m_reghi[dstp.ireg()]), ecx); // and dstp.hi,ecx
+ if (dstp.ireg() == Gp::kIdBx)
+ a.or_(ptr(esp, -8), eax); // or dstp.lo,eax
+ else
+ a.or_(Gpd(dstp.ireg()), eax); // or dstp.lo,eax
+ a.or_(MABS(m_reghi[dstp.ireg()]), edx); // or dstp.hi,edx
}
else
{
- emit_and_m32_r32(dst, MABS(dstp.memory()), tempreg); // and dstp.lo,ebx
- emit_and_m32_r32(dst, MABS(dstp.memory(4)), REG_ECX); // and dstp.hi,ecx
- emit_or_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // or dstp.lo,eax
- emit_or_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // or dstp.hi,edx
+ a.and_(MABS(dstp.memory(0)), ebx); // and dstp.lo,ebx
+ a.and_(MABS(dstp.memory(4)), ecx); // and dstp.hi,ecx
+ a.or_(MABS(dstp.memory(0)), eax); // or dstp.lo,eax
+ a.or_(MABS(dstp.memory(4)), edx); // or dstp.hi,edx
+ }
+
+ a.mov(ebx, ptr(esp, -8)); // mov ebx,[esp-8]
+
+ if (inst.flags())
+ {
+ if (dstp.is_int_register())
+ calculate_status_flags(a, Gpd(dstp.ireg()), FLAG_Z);
+ else
+ calculate_status_flags(a, MABS(dstp.memory(0)), FLAG_Z);
+
+ a.pushfd();
+
+ if (dstp.is_int_register())
+ calculate_status_flags(a, MABS(m_reghi[dstp.ireg()]), FLAG_S | FLAG_Z);
+ else
+ calculate_status_flags(a, MABS(dstp.memory(4)), FLAG_S | FLAG_Z);
+
+ emit_combine_z_flags(a);
}
- emit_mov_r32_m32(dst, tempreg, MBD(REG_ESP, -8)); // mov ebx,[esp-8]
- }
- if (inst.flags() == FLAG_Z)
- emit_or_r32_r32(dst, REG_EAX, REG_EDX); // or eax,edx
- else if (inst.flags() == FLAG_S)
- ;// do nothing -- final OR will have the right result
- else if (inst.flags() == (FLAG_Z | FLAG_S))
- {
- emit_movzx_r32_r16(dst, REG_ECX, REG_AX); // movzx ecx,ax
- emit_shr_r32_imm(dst, REG_EAX, 16); // shr eax,16
- emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx
- emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax
}
}
}
@@ -4500,7 +4742,7 @@ void drcbe_x86::op_rolins(x86code *&dst, const instruction &inst)
// op_add - process a ADD opcode
//-------------------------------------------------
-void drcbe_x86::op_add(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_add(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4514,29 +4756,39 @@ void drcbe_x86::op_add(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_add_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // add [dstp],src2p
+ alu_op_param(a, Inst::kIdAdd, MABS(dstp.memory(), 4), src2p, // add [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ });
// reg = reg + imm
- else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && inst.flags() == 0)
- emit_lea_r32_m32(dst, dstp.ireg(), MBD(src1p.ireg(), src2p.immediate())); // lea dstp,[src1p+src2p]
+ else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && !inst.flags())
+ a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), src2p.immediate())); // lea dstp,[src1p+src2p]
// reg = reg + reg
- else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_int_register() && inst.flags() == 0)
- emit_lea_r32_m32(dst, dstp.ireg(), MBISD(src1p.ireg(), src2p.ireg(), 1, 0)); // lea dstp,[src1p+src2p]
+ else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_int_register() && !inst.flags())
+ a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), Gpd(src2p.ireg()))); // lea dstp,[src1p+src2p]
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_add_r32_p32(dst, dstreg, src2p, inst); // add dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdAdd, dstreg, src2p, // add dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -4545,14 +4797,16 @@ void drcbe_x86::op_add(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_add_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // add [dstp],src2p
+ alu_op_param(a, Inst::kIdAdd, Inst::kIdAdc, // add [dstp],src2p
+ MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z);
// general case
else
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_add_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // add dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ alu_op_param(a, Inst::kIdAdd, Inst::kIdAdc, // add edx:dstreg,src2p
+ dstreg, edx, src2p, inst.flags() & FLAG_Z);
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -4562,7 +4816,7 @@ void drcbe_x86::op_add(x86code *&dst, const instruction &inst)
// op_addc - process a ADDC opcode
//-------------------------------------------------
-void drcbe_x86::op_addc(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_addc(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4576,21 +4830,21 @@ void drcbe_x86::op_addc(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_adc_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // adc [dstp],src2p
+ alu_op_param(a, Inst::kIdAdc, MABS(dstp.memory(), 4), src2p); // adc [dstp],src2p
// general case
else
{
- emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p
- emit_adc_r32_p32(dst, dstreg, src2p, inst); // adc dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdAdc, dstreg, src2p); // adc dstreg,src2p
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -4599,14 +4853,16 @@ void drcbe_x86::op_addc(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_adc_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // adc [dstp],src2p
+ alu_op_param(a, Inst::kIdAdc, Inst::kIdAdc, // adc [dstp],src2p
+ MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z);
// general case
else
{
- emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_adc_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // adc dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ alu_op_param(a, Inst::kIdAdc, Inst::kIdAdc, // adc edx:dstreg,src2p
+ dstreg, edx, src2p, inst.flags() & FLAG_Z);
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -4616,7 +4872,7 @@ void drcbe_x86::op_addc(x86code *&dst, const instruction &inst)
// op_sub - process a SUB opcode
//-------------------------------------------------
-void drcbe_x86::op_sub(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_sub(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4629,25 +4885,35 @@ void drcbe_x86::op_sub(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_sub_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // sub [dstp],src2p
+ alu_op_param(a, Inst::kIdSub, MABS(dstp.memory(), 4), src2p, // sub [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ });
// reg = reg - imm
- else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && inst.flags() == 0)
- emit_lea_r32_m32(dst, dstp.ireg(), MBD(src1p.ireg(), -src2p.immediate())); // lea dstp,[src1p-src2p]
+ else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && !inst.flags())
+ a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), -src2p.immediate())); // lea dstp,[src1p-src2p]
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_sub_r32_p32(dst, dstreg, src2p, inst); // sub dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdSub, dstreg, src2p, // sub dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -4656,14 +4922,16 @@ void drcbe_x86::op_sub(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_sub_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // sub [dstp],src2p
+ alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // sub [dstp],src2p
+ MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z);
// general case
else
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_sub_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // sub dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // sub edx:dstreg,src2p
+ dstreg, edx, src2p, inst.flags() & FLAG_Z);
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -4673,7 +4941,7 @@ void drcbe_x86::op_sub(x86code *&dst, const instruction &inst)
// op_subc - process a SUBC opcode
//-------------------------------------------------
-void drcbe_x86::op_subc(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_subc(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4686,21 +4954,21 @@ void drcbe_x86::op_subc(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_sbb_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // sbb [dstp],src2p
+ alu_op_param(a, Inst::kIdSbb, MABS(dstp.memory(), 4), src2p); // sbb [dstp],src2p
// general case
else
{
- emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p
- emit_sbb_r32_p32(dst, dstreg, src2p, inst); // sbb dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdSbb, dstreg, src2p); // sbb dstreg,src2p
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -4709,14 +4977,16 @@ void drcbe_x86::op_subc(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_sbb_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // sbb [dstp],src2p
+ alu_op_param(a, Inst::kIdSbb, Inst::kIdSbb, // sbb [dstp],src2p
+ MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z);
// general case
else
{
- emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_sbb_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // sbb dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ alu_op_param(a, Inst::kIdSbb, Inst::kIdSbb, // sbb edx:dstreg,src2p
+ dstreg, edx, src2p, inst.flags() & FLAG_Z);
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -4726,7 +4996,7 @@ void drcbe_x86::op_subc(x86code *&dst, const instruction &inst)
// op_cmp - process a CMP opcode
//-------------------------------------------------
-void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_cmp(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -4738,21 +5008,21 @@ void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(1), PTYPE_MRI);
// pick a target register for the general case
- int src1reg = src1p.select_register(REG_EAX);
+ Gp const src1reg = src1p.select_register(eax);
// 32-bit form
if (inst.size() == 4)
{
// memory versus anything
if (src1p.is_memory())
- emit_cmp_m32_p32(dst, MABS(src1p.memory()), src2p, inst); // cmp [dstp],src2p
+ alu_op_param(a, Inst::kIdCmp, MABS(src1p.memory(), 4), src2p); // cmp [src1p],src2p
// general case
else
{
if (src1p.is_immediate())
- emit_mov_r32_imm(dst, src1reg, src1p.immediate()); // mov src1reg,imm
- emit_cmp_r32_p32(dst, src1reg, src2p, inst); // cmp src1reg,src2p
+ a.mov(src1reg, src1p.immediate()); // mov src1reg,imm
+ alu_op_param(a, Inst::kIdCmp, src1reg, src2p); // cmp src1reg,src2p
}
}
@@ -4760,8 +5030,11 @@ void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst)
else
{
// general case
- emit_mov_r64_p64(dst, REG_EAX, REG_EDX, src1p); // mov eax:dstp,[src1p]
- emit_cmp_r64_p64(dst, REG_EAX, REG_EDX, src2p, inst); // cmp eax:dstp,src2p
+ emit_mov_r64_p64(a, eax, edx, src1p); // mov edx:eax,[src1p]
+ alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // cmp edx:eax,src2p
+ eax, edx, src2p, (inst.flags() & FLAG_Z) && (inst.flags() != FLAG_Z));
+ if (inst.flags() == FLAG_Z)
+ a.or_(edx, eax);
}
}
@@ -4770,11 +5043,8 @@ void drcbe_x86::op_cmp(x86code *&dst, const instruction &inst)
// op_mulu - process a MULU opcode
//-------------------------------------------------
-void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_mulu(Assembler &a, const instruction &inst)
{
- uint8_t zsflags = inst.flags() & (FLAG_Z | FLAG_S);
- uint8_t vflag = inst.flags() & FLAG_V;
-
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
assert_no_condition(inst);
@@ -4786,102 +5056,143 @@ void drcbe_x86::op_mulu(x86code *&dst, const instruction &inst)
be_parameter src1p(*this, inst.param(2), PTYPE_MRI);
be_parameter src2p(*this, inst.param(3), PTYPE_MRI);
normalize_commutative(src1p, src2p);
- bool compute_hi = (dstp != edstp);
+ const bool compute_hi = (dstp != edstp);
- // 32-bit form
if (inst.size() == 4)
{
- // general case
- emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p
- if (src2p.is_memory())
- emit_mul_m32(dst, MABS(src2p.memory())); // mul [src2p]
- else if (src2p.is_int_register())
- emit_mul_r32(dst, src2p.ireg()); // mul src2p
- else if (src2p.is_immediate())
- {
- emit_mov_r32_imm(dst, REG_EDX, src2p.immediate()); // mov edx,src2p
- emit_mul_r32(dst, REG_EDX); // mul edx
- }
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
+ // 32-bit form
+ emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p
+ emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p
+ a.mul(edx); // mul edx
+
+ emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax
if (compute_hi)
- emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx
+ emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx
- // compute flags
- if (inst.flags() != 0)
+ if (inst.flags())
{
- if (zsflags != 0)
- {
- if (vflag)
- emit_pushf(dst); // pushf
- if (compute_hi)
- {
- if (zsflags == FLAG_Z)
- emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax
- else if (zsflags == FLAG_S)
- emit_test_r32_r32(dst, REG_EDX, REG_EDX); // test edx,edx
- else
- {
- emit_movzx_r32_r16(dst, REG_ECX, REG_AX); // movzx ecx,ax
- emit_shr_r32_imm(dst, REG_EAX, 16); // shr eax,16
- emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx
- emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax
- }
- }
- else
- emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax
+ a.pushfd();
- // we rely on the fact that OF is cleared by all logical operations above
- if (vflag)
- {
- emit_pushf(dst); // pushf
- emit_pop_r32(dst, REG_EAX); // pop eax
- emit_and_m32_imm(dst, MBD(REG_ESP, 0), ~0x84); // and [esp],~0x84
- emit_or_m32_r32(dst, MBD(REG_ESP, 0), REG_EAX); // or [esp],eax
- emit_popf(dst); // popf
- }
- }
+ a.test(edx, edx);
+ a.pushfd(); // will have the sign flag + upper half zero
+ a.pop(edx);
+
+ a.test(eax, eax);
+ a.pushfd(); // lower half zero
+ a.pop(eax);
+
+ a.and_(dword_ptr(esp, 0), ~(0x40 | 0x80));
+ a.mov(ecx, edx);
+ a.and_(ecx, 0x80); // sign
+
+ a.and_(eax, edx);
+ a.and_(eax, 0x40); // zero
+
+ a.or_(eax, ecx);
+ a.or_(dword_ptr(esp, 0), eax);
+
+ a.popfd();
}
}
-
- // 64-bit form
else if (inst.size() == 8)
{
- // general case
- emit_mov_m32_imm(dst, MBD(REG_ESP, 24), inst.flags()); // mov [esp+24],flags
- emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p
+ // 64-bit form
+ a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags
+ emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p
+ emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p
if (!compute_hi)
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo
+ a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo
else
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // mov [esp+4],&reshi
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo
- emit_call(dst, (x86code *)dmulu); // call dmulu
+ a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // mov [esp+4],&reshi
+ a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo
+ a.call(imm(dmulu<false>)); // call dmulu (calculate ZS flags as 64*64->128)
if (inst.flags() != 0)
- emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4]
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
- emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
+ a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
+ emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax
if (compute_hi)
{
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
- emit_mov_r32_m32(dst, REG_ECX, MABS((uint32_t *)&m_reshi + 1)); // mov ecx,reshi.hi
- emit_mov_p64_r64(dst, edstp, REG_EAX, REG_ECX); // mov edstp,ecx:eax
+ a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
+ a.mov(ecx, MABS((uint32_t *)&m_reshi + 1)); // mov ecx,reshi.hi
+ emit_mov_p64_r64(a, edstp, eax, ecx); // mov edstp,ecx:eax
}
if (inst.flags() != 0)
- emit_popf(dst); // popf
+ a.popfd(); // popf
}
}
//-------------------------------------------------
-// op_muls - process a MULS opcode
+// op_mululw - process a MULULW (32x32=32) opcode
//-------------------------------------------------
-void drcbe_x86::op_muls(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_mululw(Assembler &a, const instruction &inst)
{
- uint8_t zsflags = inst.flags() & (FLAG_Z | FLAG_S);
- uint8_t vflag = inst.flags() & FLAG_V;
+ // validate instruction
+ assert(inst.size() == 4 || inst.size() == 8);
+ assert_no_condition(inst);
+ assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S);
+
+ // normalize parameters
+ be_parameter dstp(*this, inst.param(0), PTYPE_MR);
+ be_parameter src1p(*this, inst.param(1), PTYPE_MRI);
+ be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
+ normalize_commutative(src1p, src2p);
+
+ if (inst.size() == 4)
+ {
+ // 32-bit form
+ emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p
+ emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p
+ a.mul(edx); // mul edx
+ emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax
+
+ if (inst.flags())
+ {
+ a.test(eax, eax);
+ a.pushfd(); // sign + zero
+
+ // if edx is not zero then it overflowed
+ a.test(edx, edx);
+ a.pushfd();
+ a.pop(edx);
+ a.and_(edx, 0x40); // zero
+ a.xor_(edx, 0x40);
+ a.shl(edx, 5); // turn into overflow flag
+ a.or_(dword_ptr(esp, 0), edx);
+
+ a.popfd();
+ }
+ }
+ else if (inst.size() == 8)
+ {
+ // 64-bit form
+ a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags
+ emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p
+ emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p
+ a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo
+ a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo
+ a.call(imm(dmulu<true>)); // call dmulu (calculate ZS flags as 64*64->64)
+ if (inst.flags() != 0)
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
+ a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
+ emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax
+ if (inst.flags() != 0)
+ a.popfd(); // popf
+ }
+
+}
+
+
+//-------------------------------------------------
+// op_muls - process a MULS opcode
+//-------------------------------------------------
+
+void drcbe_x86::op_muls(Assembler &a, const instruction &inst)
+{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
assert_no_condition(inst);
@@ -4893,112 +5204,134 @@ void drcbe_x86::op_muls(x86code *&dst, const instruction &inst)
be_parameter src1p(*this, inst.param(2), PTYPE_MRI);
be_parameter src2p(*this, inst.param(3), PTYPE_MRI);
normalize_commutative(src1p, src2p);
- bool compute_hi = (dstp != edstp);
+ const bool compute_hi = (dstp != edstp);
- // 32-bit form
if (inst.size() == 4)
{
- // 32-bit destination with memory/immediate or register/immediate
- if (!compute_hi && !src1p.is_immediate() && src2p.is_immediate())
- {
- if (src1p.is_memory())
- emit_imul_r32_m32_imm(dst, REG_EAX, MABS(src1p.memory()), src2p.immediate()); // imul eax,[src1p],src2p
- else if (src1p.is_int_register())
- emit_imul_r32_r32_imm(dst, REG_EAX, src1p.ireg(), src2p.immediate()); // imul eax,src1p,src2p
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
- }
+ // 32-bit form
+ emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p
+ emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p
+ a.imul(edx); // imul edx
- // 32-bit destination, general case
- else if (!compute_hi)
- {
- emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p
- if (src2p.is_memory())
- emit_imul_r32_m32(dst, REG_EAX, MABS(src2p.memory())); // imul eax,[src2p]
- else if (src2p.is_int_register())
- emit_imul_r32_r32(dst, REG_EAX, src2p.ireg()); // imul eax,src2p
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
- }
+ emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax
+ if (compute_hi)
+ emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx
- // 64-bit destination, general case
- else
+ if (inst.flags())
{
- emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p
- if (src2p.is_memory())
- emit_imul_m32(dst, MABS(src2p.memory())); // imul [src2p]
- else if (src2p.is_int_register())
- emit_imul_r32(dst, src2p.ireg()); // imul src2p
- else if (src2p.is_immediate())
- {
- emit_mov_r32_imm(dst, REG_EDX, src2p.immediate()); // mov edx,src2p
- emit_imul_r32(dst, REG_EDX); // imul edx
- }
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
- emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx
- }
+ a.pushfd();
- // compute flags
- if (inst.flags() != 0)
- {
- if (zsflags != 0)
- {
- if (vflag)
- emit_pushf(dst); // pushf
- if (compute_hi)
- {
- if (inst.flags() == FLAG_Z)
- emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax
- else if (inst.flags() == FLAG_S)
- emit_test_r32_r32(dst, REG_EDX, REG_EDX); // test edx,edx
- else
- {
- emit_movzx_r32_r16(dst, REG_ECX, REG_AX); // movzx ecx,ax
- emit_shr_r32_imm(dst, REG_EAX, 16); // shr eax,16
- emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx
- emit_or_r32_r32(dst, REG_EDX, REG_EAX); // or edx,eax
- }
- }
- else
- emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax
+ a.test(edx, edx);
+ a.pushfd(); // will have the sign flag + upper half zero
+ a.pop(edx);
- // we rely on the fact that OF is cleared by all logical operations above
- if (vflag)
- {
- emit_pushf(dst); // pushf
- emit_pop_r32(dst, REG_EAX); // pop eax
- emit_and_m32_imm(dst, MBD(REG_ESP, 0), ~0x84); // and [esp],~0x84
- emit_or_m32_r32(dst, MBD(REG_ESP, 0), REG_EAX); // or [esp],eax
- emit_popf(dst); // popf
- }
- }
+ a.test(eax, eax);
+ a.pushfd(); // lower half zero
+ a.pop(eax);
+
+ a.and_(dword_ptr(esp, 0), ~(0x40 | 0x80));
+ a.mov(ecx, edx);
+ a.and_(ecx, 0x80); // sign
+
+ a.and_(eax, edx);
+ a.and_(eax, 0x40); // zero
+
+ a.or_(eax, ecx);
+ a.or_(dword_ptr(esp, 0), eax);
+
+ a.popfd();
}
}
-
- // 64-bit form
else if (inst.size() == 8)
{
- // general case
- emit_mov_m32_imm(dst, MBD(REG_ESP, 24), inst.flags()); // mov [esp+24],flags
- emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p
+ // 64-bit form
+ a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags
+ emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p
+ emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p
if (!compute_hi)
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo
+ a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo
else
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // push [esp+4],&reshi
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo
- emit_call(dst, (x86code *)dmuls); // call dmuls
+ a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi
+ a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo
+ a.call(imm(dmuls<false>)); // call dmuls (calculate ZS flags as 64*64->128)
if (inst.flags() != 0)
- emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4]
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
- emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
+ a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
+ emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax
if (compute_hi)
{
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi
- emit_mov_p64_r64(dst, edstp, REG_EAX, REG_EDX); // mov edstp,edx:eax
+ a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
+ a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi
+ emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax
}
if (inst.flags() != 0)
- emit_popf(dst); // popf
+ a.popfd(); // popf
+ }
+}
+
+
+//-------------------------------------------------
+// op_mulslw - process a MULSLW (32x32=32) opcode
+//-------------------------------------------------
+
+void drcbe_x86::op_mulslw(Assembler &a, const instruction &inst)
+{
+ // validate instruction
+ assert(inst.size() == 4 || inst.size() == 8);
+ assert_no_condition(inst);
+ assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S);
+
+ // normalize parameters
+ be_parameter dstp(*this, inst.param(0), PTYPE_MR);
+ be_parameter src1p(*this, inst.param(1), PTYPE_MRI);
+ be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
+ normalize_commutative(src1p, src2p);
+
+ if (inst.size() == 4)
+ {
+ // 32-bit form
+ emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p
+ emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p
+ a.imul(edx); // imul edx
+
+ emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax
+
+ if (inst.flags())
+ {
+ a.test(eax, eax);
+ a.pushfd(); // sign + zero
+
+ a.mov(ecx, edx);
+ a.cdq();
+
+ a.cmp(ecx, edx);
+ a.pushfd();
+ a.pop(edx);
+ a.and_(edx, 0x40); // zero
+ a.xor_(edx, 0x40);
+ a.shl(edx, 5); // turn into overflow flag
+ a.or_(dword_ptr(esp, 0), edx);
+
+ a.popfd();
+ }
+ }
+ else if (inst.size() == 8)
+ {
+ // 64-bit form
+ a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags
+ emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p
+ emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p
+ a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo
+ a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo
+ a.call(imm(dmuls<true>)); // call dmuls (calculate ZS flags as 64*64->64)
+ if (inst.flags() != 0)
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
+ a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
+ emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax
+ if (inst.flags() != 0)
+ a.popfd(); // popf
}
}
@@ -5007,7 +5340,7 @@ void drcbe_x86::op_muls(x86code *&dst, const instruction &inst)
// op_divu - process a DIVU opcode
//-------------------------------------------------
-void drcbe_x86::op_divu(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_divu(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5021,54 +5354,52 @@ void drcbe_x86::op_divu(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(3), PTYPE_MRI);
bool compute_rem = (dstp != edstp);
- // 32-bit form
if (inst.size() == 4)
{
- // general case
- emit_mov_r32_p32(dst, REG_ECX, src2p); // mov ecx,src2p
+ // 32-bit form
+ emit_mov_r32_p32(a, ecx, src2p); // mov ecx,src2p
if (inst.flags() != 0)
{
- emit_mov_r32_imm(dst, REG_EAX, 0xa0000000); // mov eax,0xa0000000
- emit_add_r32_r32(dst, REG_EAX, REG_EAX); // add eax,eax
+ a.mov(eax, 0xa0000000); // mov eax,0xa0000000
+ a.add(eax, eax); // add eax,eax
}
- emit_link skip;
- emit_jecxz_link(dst, skip); // jecxz skip
- emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p
- emit_xor_r32_r32(dst, REG_EDX, REG_EDX); // xor edx,edx
- emit_div_r32(dst, REG_ECX); // div ecx
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
+ Label skip = a.newLabel();
+ a.short_().jecxz(skip); // jecxz skip
+ emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p
+ a.xor_(edx, edx); // xor edx,edx
+ a.div(ecx); // div ecx
+ emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax
if (compute_rem)
- emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx
+ emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx
if (inst.flags() != 0)
- emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax
- track_resolve_link(dst, skip); // skip:
+ a.test(eax, eax); // test eax,eax
+ a.bind(skip); // skip:
+ reset_last_upper_lower_reg();
}
-
- // 64-bit form
else if (inst.size() == 8)
{
- // general case
- emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p
+ // 64-bit form
+ emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p
+ emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p
if (!compute_rem)
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo
+ a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo
else
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // push [esp+4],&reshi
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo
- emit_call(dst, (x86code *)ddivu); // call ddivu
+ a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi
+ a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo
+ a.call(imm(ddivu)); // call ddivu
if (inst.flags() != 0)
- emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4]
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
- emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
+ a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
+ emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax
if (compute_rem)
{
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi
- emit_mov_p64_r64(dst, edstp, REG_EAX, REG_EDX); // mov edstp,edx:eax
+ a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
+ a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi
+ emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax
}
if (inst.flags() != 0)
- emit_popf(dst); // popf
+ a.popfd(); // popf
}
}
@@ -5077,7 +5408,7 @@ void drcbe_x86::op_divu(x86code *&dst, const instruction &inst)
// op_divs - process a DIVS opcode
//-------------------------------------------------
-void drcbe_x86::op_divs(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_divs(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5091,54 +5422,52 @@ void drcbe_x86::op_divs(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(3), PTYPE_MRI);
bool compute_rem = (dstp != edstp);
- // 32-bit form
if (inst.size() == 4)
{
- // general case
- emit_mov_r32_p32(dst, REG_ECX, src2p); // mov ecx,src2p
+ // 32-bit form
+ emit_mov_r32_p32(a, ecx, src2p); // mov ecx,src2p
if (inst.flags() != 0)
{
- emit_mov_r32_imm(dst, REG_EAX, 0xa0000000); // mov eax,0xa0000000
- emit_add_r32_r32(dst, REG_EAX, REG_EAX); // add eax,eax
+ a.mov(eax, 0xa0000000); // mov eax,0xa0000000
+ a.add(eax, eax); // add eax,eax
}
- emit_link skip;
- emit_jecxz_link(dst, skip); // jecxz skip
- emit_mov_r32_p32(dst, REG_EAX, src1p); // mov eax,src1p
- emit_cdq(dst); // cdq
- emit_idiv_r32(dst, REG_ECX); // idiv ecx
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
+ Label skip = a.newLabel();
+ a.short_().jecxz(skip); // jecxz skip
+ emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p
+ a.cdq(); // cdq
+ a.idiv(ecx); // idiv ecx
+ emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax
if (compute_rem)
- emit_mov_p32_r32(dst, edstp, REG_EDX); // mov edstp,edx
+ emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx
if (inst.flags() != 0)
- emit_test_r32_r32(dst, REG_EAX, REG_EAX); // test eax,eax
- track_resolve_link(dst, skip); // skip:
+ a.test(eax, eax); // test eax,eax
+ a.bind(skip); // skip:
+ reset_last_upper_lower_reg();
}
-
- // 64-bit form
else if (inst.size() == 8)
{
- // general case
- emit_mov_m64_p64(dst, MBD(REG_ESP, 16), src2p); // mov [esp+16],src2p
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), src1p); // mov [esp+8],src1p
+ // 64-bit form
+ emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p
+ emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p
if (!compute_rem)
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reslo); // mov [esp+4],&reslo
+ a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo
else
- emit_mov_m32_imm(dst, MBD(REG_ESP, 4), (uintptr_t)&m_reshi); // push [esp+4],&reshi
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)&m_reslo); // mov [esp],&reslo
- emit_call(dst, (x86code *)ddivs); // call ddivs
+ a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi
+ a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo
+ a.call(imm(ddivs)); // call ddivs
if (inst.flags() != 0)
- emit_push_m32(dst, MABSI(flags_unmap, REG_EAX, 4)); // push flags_unmap[eax*4]
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
- emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax
+ a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4]
+ a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo
+ a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi
+ emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax
if (compute_rem)
{
- emit_mov_r32_m32(dst, REG_EAX, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
- emit_mov_r32_m32(dst, REG_EDX, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi
- emit_mov_p64_r64(dst, edstp, REG_EAX, REG_EDX); // mov edstp,edx:eax
+ a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo
+ a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi
+ emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax
}
if (inst.flags() != 0)
- emit_popf(dst); // popf
+ a.popfd(); // popf
}
}
@@ -5147,7 +5476,7 @@ void drcbe_x86::op_divs(x86code *&dst, const instruction &inst)
// op_and - process a AND opcode
//-------------------------------------------------
-void drcbe_x86::op_and(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_and(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5161,41 +5490,93 @@ void drcbe_x86::op_and(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_and_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // and [dstp],src2p
+ alu_op_param(a, Inst::kIdAnd, MABS(dstp.memory(), 4), src2p, // and [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && !src.immediate())
+ {
+ a.mov(dst.as<Mem>(), imm(0));
+ return true;
+ }
+ else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ return true;
+
+ return false;
+ });
+
+ // dstp == src2p in memory
+ else if (dstp.is_memory() && dstp == src2p)
+ alu_op_param(a, Inst::kIdAnd, MABS(dstp.memory(), 4), src1p, // and [dstp],src1p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && !src.immediate())
+ {
+ a.mov(dst.as<Mem>(), imm(0));
+ return true;
+ }
+ else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ return true;
+
+ return false;
+ });
// AND with immediate 0xff
- else if (src2p.is_immediate_value(0xff) && inst.flags() == 0)
+ else if (src2p.is_immediate_value(0xff) && !inst.flags())
{
if (src1p.is_int_register())
- emit_movzx_r32_r8(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p
+ {
+ if (src1p.ireg() & 4)
+ {
+ if (dstreg.id() != src1p.ireg())
+ a.mov(dstreg, Gpd(src1p.ireg())); // mov dstreg,src1p
+ a.and_(dstreg, 0xff); // and dstreg,0xff
+ }
+ else
+ a.movzx(dstreg, GpbLo(src1p.ireg())); // movzx dstreg,src1p
+ }
else if (src1p.is_memory())
- emit_movzx_r32_m8(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p]
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ a.movzx(dstreg, MABS(src1p.memory(), 1)); // movzx dstreg,[src1p]
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
// AND with immediate 0xffff
- else if (src2p.is_immediate_value(0xffff) && inst.flags() == 0)
+ else if (src2p.is_immediate_value(0xffff) && !inst.flags())
{
if (src1p.is_int_register())
- emit_movzx_r32_r16(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p
+ a.movzx(dstreg, Gpw(src1p.ireg())); // movzx dstreg,src1p
else if (src1p.is_memory())
- emit_movzx_r32_m16(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p]
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ a.movzx(dstreg, MABS(src1p.memory(), 2)); // movzx dstreg,[src1p]
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_and_r32_p32(dst, dstreg, src2p, inst); // and dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdAnd, dstreg, src2p, // and dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && !src.immediate())
+ {
+ a.xor_(dst.as<Gpd>(), dst.as<Gpd>());
+ return true;
+ }
+ else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ return true;
+
+ return false;
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5204,79 +5585,107 @@ void drcbe_x86::op_and(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_and_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // and [dstp],src2p
+ emit_and_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // and [dstp],src2p
+ src2p, inst);
+
+ // dstp == src2p in memory
+ else if (dstp.is_memory() && dstp == src2p)
+ emit_and_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // and [dstp],src1p
+ src1p, inst);
// AND with immediate 0xff
- else if (src2p.is_immediate_value(0xff) && inst.flags() == 0)
+ else if (src2p.is_immediate_value(0xff) && !inst.flags())
{
if (src1p.is_int_register())
- emit_movzx_r32_r8(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p
+ {
+ if (src1p.ireg() & 4)
+ {
+ if (dstreg.id() != src1p.ireg())
+ a.mov(dstreg, Gpd(src1p.ireg())); // mov dstreg,src1p
+ a.and_(dstreg, 0xff); // and dstreg,0xff
+ }
+ else
+ a.movzx(dstreg, GpbLo(src1p.ireg())); // movzx dstreg,src1p
+ }
else if (src1p.is_memory())
- emit_movzx_r32_m8(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p]
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ a.movzx(dstreg, MABS(src1p.memory(), 1)); // movzx dstreg,[src1p]
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0
else if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0
}
// AND with immediate 0xffff
- else if (src2p.is_immediate_value(0xffff) && inst.flags() == 0)
+ else if (src2p.is_immediate_value(0xffff) && !inst.flags())
{
if (src1p.is_int_register())
- emit_movzx_r32_r16(dst, dstreg, src1p.ireg()); // movzx dstreg,src1p
+ a.movzx(dstreg, Gpw(src1p.ireg())); // movzx dstreg,src1p
else if (src1p.is_memory())
- emit_movzx_r32_m16(dst, dstreg, MABS(src1p.memory())); // movzx dstreg,[src1p]
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ a.movzx(dstreg, MABS(src1p.memory(), 2)); // movzx dstreg,[src1p]
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0
else if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0
}
// AND with immediate 0xffffffff
- else if (src2p.is_immediate_value(0xffffffff) && inst.flags() == 0)
+ else if (src2p.is_immediate_value(0xffffffffU) && !inst.flags())
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0
else if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0
}
// AND with immediate 0xffffffff00000000
- else if (src2p.is_immediate_value(0xffffffff00000000U) && inst.flags() == 0)
+ else if (src2p.is_immediate_value(0xffffffff00000000ULL) && !inst.flags())
{
if (src1p != dstp)
{
- emit_mov_r64_p64(dst, REG_NONE, REG_EDX, src1p); // mov dstreg,src1p
- emit_mov_p64_r64(dst, dstp, REG_NONE, REG_EDX); // mov dstp,dstreg
+ emit_mov_r64_p64(a, Gp(), edx, src1p); // mov dstreg,src1p
+ emit_mov_p64_r64(a, dstp, Gp(), edx); // mov dstp,dstreg
}
if (dstp.is_int_register())
- emit_xor_r32_r32(dst, dstp.ireg(), dstp.ireg()); // xor dstlo,dstlo
+ a.xor_(Gpd(dstp.ireg()), Gpd(dstp.ireg())); // xor dstlo,dstlo
else if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory()), 0); // mov dstlo,0
+ a.mov(MABS(dstp.memory(0), 4), 0); // mov dstlo,0
}
// AND with immediate <= 0xffffffff
- else if (src2p.is_immediate() && src2p.immediate() <= 0xffffffff && inst.flags() == 0)
+ else if (src2p.is_immediate() && src2p.immediate() <= 0xffffffffU && !inst.flags())
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_and_r32_p32(dst, dstreg, src2p, inst); // and dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdAnd, dstreg, src2p, // and dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && !src.immediate())
+ {
+ a.xor_(dst.as<Gpd>(), dst.as<Gpd>());
+ return true;
+ }
+ else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ return true;
+
+ return false;
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
if (dstp.is_int_register())
- emit_mov_m32_imm(dst, MABS(m_reghi[dstp.ireg()]), 0); // mov dsthi,0
+ a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0
else if (dstp.is_memory())
- emit_mov_m32_imm(dst, MABS(dstp.memory(4)), 0); // mov dsthi,0
+ a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0
}
// general case
else
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_and_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // and dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_and_r64_p64(a, dstreg, edx, src2p, inst); // and edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -5286,7 +5695,7 @@ void drcbe_x86::op_and(x86code *&dst, const instruction &inst)
// op_test - process a TEST opcode
//-------------------------------------------------
-void drcbe_x86::op_test(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_test(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5299,20 +5708,20 @@ void drcbe_x86::op_test(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// pick a target register for the general case
- int src1reg = src1p.select_register(REG_EAX);
+ Gp const src1reg = src1p.select_register(eax);
// 32-bit form
if (inst.size() == 4)
{
// src1p in memory
if (src1p.is_memory())
- emit_test_m32_p32(dst, MABS(src1p.memory()), src2p, inst); // test [src1p],src2p
+ alu_op_param(a, Inst::kIdTest, MABS(src1p.memory(), 4), src2p); // test [src1p],src2p
// general case
else
{
- emit_mov_r32_p32(dst, src1reg, src1p); // mov src1reg,src1p
- emit_test_r32_p32(dst, src1reg, src2p, inst); // test src1reg,src2p
+ emit_mov_r32_p32(a, src1reg, src1p); // mov src1reg,src1p
+ alu_op_param(a, Inst::kIdTest, src1reg, src2p); // test src1reg,src2p
}
}
@@ -5321,13 +5730,15 @@ void drcbe_x86::op_test(x86code *&dst, const instruction &inst)
{
// src1p in memory
if (src1p.is_memory())
- emit_test_m64_p64(dst, MABS(src1p.memory()), src2p, inst); // test [dstp],src2p
+ alu_op_param(a, Inst::kIdTest, Inst::kIdTest, // test [dstp],src2p
+ MABS(src1p.memory(0), 4), MABS(src1p.memory(4), 4), src2p, inst.flags() & FLAG_Z);
// general case
else
{
- emit_mov_r64_p64(dst, src1reg, REG_EDX, src1p); // mov src1reg:dstp,[src1p]
- emit_test_r64_p64(dst, src1reg, REG_EDX, src2p, inst); // test src1reg:dstp,src2p
+ emit_mov_r64_p64(a, src1reg, edx, src1p); // mov src1reg:dstp,[src1p]
+ alu_op_param(a, Inst::kIdTest, Inst::kIdTest, // test src1reg:dstp,src2p
+ src1reg, edx, src2p, inst.flags() & FLAG_Z);
}
}
}
@@ -5337,7 +5748,7 @@ void drcbe_x86::op_test(x86code *&dst, const instruction &inst)
// op_or - process a OR opcode
//-------------------------------------------------
-void drcbe_x86::op_or(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_or(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5351,21 +5762,62 @@ void drcbe_x86::op_or(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_or_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // or [dstp],src2p
+ alu_op_param(a, Inst::kIdOr, MABS(dstp.memory(), 4), src2p, // or [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ {
+ a.mov(dst.as<Mem>(), imm(-1));
+ return true;
+ }
+ else if (!inst.flags() && !src.immediate())
+ return true;
+
+ return false;
+ });
+ // dstp == src2p in memory
+ else if (dstp.is_memory() && dstp == src2p)
+ alu_op_param(a, Inst::kIdOr, MABS(dstp.memory(), 4), src1p, // or [dstp],src1p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ {
+ a.mov(dst.as<Mem>(), imm(-1));
+ return true;
+ }
+ else if (!inst.flags() && !src.immediate())
+ return true;
+ return false;
+ });
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_or_r32_p32(dst, dstreg, src2p, inst); // or dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdOr, dstreg, src2p, // or dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ {
+ a.mov(dst.as<Gp>(), imm(-1));
+ return true;
+ }
+ else if (!inst.flags() && !src.immediate())
+ return true;
+
+ return false;
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5374,14 +5826,20 @@ void drcbe_x86::op_or(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_or_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // or [dstp],src2p
+ emit_or_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // or [dstp],src2p
+ src2p, inst);
+
+ // dstp == src2p in memory
+ else if (dstp.is_memory() && dstp == src2p)
+ emit_or_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // or [dstp],src1p
+ src1p, inst);
// general case
else
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_or_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // or dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_or_r64_p64(a, dstreg, edx, src2p, inst); // or edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -5391,7 +5849,7 @@ void drcbe_x86::op_or(x86code *&dst, const instruction &inst)
// op_xor - process a XOR opcode
//-------------------------------------------------
-void drcbe_x86::op_xor(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_xor(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5405,21 +5863,64 @@ void drcbe_x86::op_xor(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_xor_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // xor [dstp],src2p
+ alu_op_param(a, Inst::kIdXor, MABS(dstp.memory(), 4), src2p, // xor [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ {
+ a.not_(dst.as<Mem>());
+ return true;
+ }
+ else if (!inst.flags() && !src.immediate())
+ return true;
+
+ return false;
+ });
+
+ // dstp == src2p in memory
+ else if (dstp.is_memory() && dstp == src2p)
+ alu_op_param(a, Inst::kIdXor, MABS(dstp.memory(), 4), src1p, // xor [dstp],src1p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ {
+ a.not_(dst.as<Mem>());
+ return true;
+ }
+ else if (!inst.flags() && !src.immediate())
+ return true;
+
+ return false;
+ });
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_xor_r32_p32(dst, dstreg, src2p, inst); // xor dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ alu_op_param(a, Inst::kIdXor, dstreg, src2p, // xor dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize all-zero and all-one cases
+ if (!inst.flags() && u32(src.immediate()) == 0xffffffffU)
+ {
+ a.not_(dst.as<Gp>());
+ return true;
+ }
+ else if (!inst.flags() && !src.immediate())
+ return true;
+
+ return false;
+ });
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5428,14 +5929,20 @@ void drcbe_x86::op_xor(x86code *&dst, const instruction &inst)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_xor_m64_p64(dst, MABS(dstp.memory()), src2p, inst); // xor [dstp],src2p
+ emit_xor_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // xor [dstp],src2p
+ src2p, inst);
+
+ // dstp == src1p in memory
+ else if (dstp.is_memory() && dstp == src2p)
+ emit_xor_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // xor [dstp],src1p
+ src1p, inst);
// general case
else
{
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_xor_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // xor dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_xor_r64_p64(a, dstreg, edx, src2p, inst); // xor edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
}
@@ -5445,7 +5952,7 @@ void drcbe_x86::op_xor(x86code *&dst, const instruction &inst)
// op_lzcnt - process a LZCNT opcode
//-------------------------------------------------
-void drcbe_x86::op_lzcnt(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_lzcnt(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5457,34 +5964,54 @@ void drcbe_x86::op_lzcnt(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
+
+ if (inst.flags())
+ {
+ a.xor_(eax, eax); // reset status flags
+ a.test(eax, eax);
+ }
// 32-bit form
if (inst.size() == 4)
{
- emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,src1p
- emit_mov_r32_imm(dst, REG_ECX, 32 ^ 31); // mov ecx,32 ^ 31
- emit_bsr_r32_r32(dst, dstreg, dstreg); // bsr dstreg,dstreg
- emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx
- emit_xor_r32_imm(dst, dstreg, 31); // xor dstreg,31
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p
+ a.mov(ecx, 32 ^ 31); // mov ecx,32 ^ 31
+ a.bsr(dstreg, dstreg); // bsr dstreg,dstreg
+ a.cmovz(dstreg, ecx); // cmovz dstreg,ecx
+ a.xor_(dstreg, 31); // xor dstreg,31
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
+
+ a.test(dstreg, dstreg);
}
// 64-bit form
else if (inst.size() == 8)
{
- emit_mov_r64_p64(dst, REG_EDX, dstreg, srcp); // mov dstreg:edx,srcp
- emit_bsr_r32_r32(dst, dstreg, dstreg); // bsr dstreg,dstreg
- emit_link skip;
- emit_jcc_short_link(dst, x86emit::COND_NZ, skip); // jnz skip
- emit_mov_r32_imm(dst, REG_ECX, 32 ^ 31); // mov ecx,32 ^ 31
- emit_bsr_r32_r32(dst, dstreg, REG_EDX); // bsr dstreg,edx
- emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx
- emit_add_r32_imm(dst, REG_ECX, 32); // add ecx,32
- track_resolve_link(dst, skip); // skip:
- emit_xor_r32_r32(dst, REG_EDX, REG_EDX); // xor edx,edx
- emit_xor_r32_imm(dst, dstreg, 31); // xor dstreg,31
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg
+ emit_mov_r64_p64(a, dstreg, edx, srcp); // mov dstreg:edx,srcp
+
+ Label skip = a.newLabel();
+ Label end = a.newLabel();
+
+ a.bsr(edx, edx);
+ a.short_().jz(skip);
+ a.xor_(edx, 31 ^ 63);
+ a.mov(dstreg, edx);
+ a.short_().jmp(end);
+
+ a.bind(skip);
+ a.mov(edx, 64 ^ 63);
+ a.bsr(dstreg, dstreg);
+ a.cmovz(dstreg, edx);
+
+ a.bind(end);
+
+ a.xor_(dstreg, 63);
+ a.mov(edx, 0);
+
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
+
+ a.test(dstreg, dstreg);
}
}
@@ -5493,7 +6020,7 @@ void drcbe_x86::op_lzcnt(x86code *&dst, const instruction &inst)
// op_tzcnt - process a TZCNT opcode
//-------------------------------------------------
-void drcbe_x86::op_tzcnt(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_tzcnt(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5504,32 +6031,45 @@ void drcbe_x86::op_tzcnt(x86code *&dst, const instruction &inst)
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
+
+ if (inst.flags())
+ {
+ a.xor_(eax, eax); // reset status flags
+ a.test(eax, eax);
+ }
// 32-bit form
if (inst.size() == 4)
{
- emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,src1p
- emit_mov_r32_imm(dst, REG_ECX, 32); // mov ecx,32
- emit_bsf_r32_r32(dst, dstreg, dstreg); // bsf dstreg,dstreg
- emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p
+ a.mov(ecx, 32); // mov ecx,32
+ a.bsf(dstreg, dstreg); // bsf dstreg,dstreg
+ a.cmovz(dstreg, ecx); // cmovz dstreg,ecx
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
+
+ a.mov(ecx, dstreg);
+ a.xor_(ecx, 32);
}
// 64-bit form
else if (inst.size() == 8)
{
- emit_link skip;
- emit_mov_r64_p64(dst, REG_EDX, dstreg, srcp); // mov dstreg:edx,srcp
- emit_bsf_r32_r32(dst, dstreg, dstreg); // bsf dstreg,dstreg
- emit_jcc_short_link(dst, x86emit::COND_NZ, skip); // jnz skip
- emit_mov_r32_imm(dst, REG_ECX, 32); // mov ecx,32
- emit_bsf_r32_r32(dst, dstreg, REG_EDX); // bsf dstreg,edx
- emit_cmovcc_r32_r32(dst, x86emit::COND_Z, dstreg, REG_ECX); // cmovz dstreg,ecx
- emit_add_r32_imm(dst, dstreg, 32); // add dstreg,32
- track_resolve_link(dst, skip); // skip:
- emit_xor_r32_r32(dst, REG_EDX, REG_EDX); // xor edx,edx
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg
+ Label skip = a.newLabel();
+ emit_mov_r64_p64(a, dstreg, edx, srcp); // mov dstreg:edx,srcp
+ a.bsf(dstreg, dstreg); // bsf dstreg,dstreg
+ a.short_().jnz(skip); // jnz skip
+ a.mov(ecx, 32); // mov ecx,32
+ a.bsf(dstreg, edx); // bsf dstreg,edx
+ a.cmovz(dstreg, ecx); // cmovz dstreg,ecx
+ a.add(dstreg, 32); // add dstreg,32
+ a.bind(skip); // skip:
+ reset_last_upper_lower_reg();
+ a.xor_(edx, edx); // xor edx,edx
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
+
+ a.mov(ecx, dstreg);
+ a.xor_(ecx, 64);
}
}
@@ -5538,7 +6078,7 @@ void drcbe_x86::op_tzcnt(x86code *&dst, const instruction &inst)
// op_bswap - process a BSWAP opcode
//-------------------------------------------------
-void drcbe_x86::op_bswap(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_bswap(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5550,36 +6090,36 @@ void drcbe_x86::op_bswap(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX);
+ Gp const dstreg = dstp.select_register(eax);
// 32-bit form
if (inst.size() == 4)
{
- emit_mov_r32_p32(dst, dstreg, srcp); // mov dstreg,src1p
- emit_bswap_r32(dst, dstreg); // bswap dstreg
+ emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p
+ a.bswap(dstreg); // bswap dstreg
if (inst.flags() != 0)
- emit_test_r32_r32(dst, dstreg, dstreg); // test dstreg,dstreg
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ a.test(dstreg, dstreg); // test dstreg,dstreg
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
// 64-bit form
else if (inst.size() == 8)
{
- emit_mov_r64_p64(dst, REG_EDX, dstreg, srcp); // mov dstreg:edx,srcp
- emit_bswap_r32(dst, dstreg); // bswap dstreg
- emit_bswap_r32(dst, REG_EDX); // bswap edx
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,edx:dstreg
+ emit_mov_r64_p64(a, edx, dstreg, srcp); // mov dstreg:edx,srcp
+ a.bswap(dstreg); // bswap dstreg
+ a.bswap(edx); // bswap edx
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
if (inst.flags() == FLAG_Z)
- emit_or_r32_r32(dst, REG_EDX, dstreg); // or edx,eax
+ a.or_(edx, dstreg); // or edx,eax
else if (inst.flags() == FLAG_S)
- emit_test_r32_r32(dst, REG_EDX, REG_EDX); // test edx,edx
+ a.test(edx, edx); // test edx,edx
else
{
- emit_movzx_r32_r16(dst, REG_ECX, dstreg); // movzx ecx,dstreg
- emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx
- emit_mov_r32_r32(dst, REG_ECX, dstreg); // mov ecx,dstreg
- emit_shr_r32_imm(dst, REG_ECX, 16); // shr ecx,16
- emit_or_r32_r32(dst, REG_EDX, REG_ECX); // or edx,ecx
+ a.movzx(ecx, dstreg.r16()); // movzx ecx,dstreg
+ a.or_(edx, ecx); // or edx,ecx
+ a.mov(ecx, dstreg); // mov ecx,dstreg
+ a.shr(ecx, 16); // shr ecx,16
+ a.or_(edx, ecx); // or edx,ecx
}
}
}
@@ -5589,7 +6129,7 @@ void drcbe_x86::op_bswap(x86code *&dst, const instruction &inst)
// op_shl - process a SHL opcode
//-------------------------------------------------
-void drcbe_x86::op_shl(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_shl(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5602,21 +6142,31 @@ void drcbe_x86::op_shl(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_shl_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // shl [dstp],src2p
+ shift_op_param(a, Inst::kIdShl, inst.size(), MABS(dstp.memory(), 4), src2p, // shl [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_shl_r32_p32(dst, dstreg, src2p, inst); // shl dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdShl, inst.size(), dstreg, src2p, // shl dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5624,9 +6174,9 @@ void drcbe_x86::op_shl(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_shl_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // shl dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_shl_r64_p64(a, dstreg, edx, src2p, inst); // shl edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5635,7 +6185,7 @@ void drcbe_x86::op_shl(x86code *&dst, const instruction &inst)
// op_shr - process a SHR opcode
//-------------------------------------------------
-void drcbe_x86::op_shr(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_shr(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5648,21 +6198,31 @@ void drcbe_x86::op_shr(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_shr_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // shr [dstp],src2p
+ shift_op_param(a, Inst::kIdShr, inst.size(), MABS(dstp.memory(), 4), src2p, // shr [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_shr_r32_p32(dst, dstreg, src2p, inst); // shr dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdShr, inst.size(), dstreg, src2p, // shr dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5670,9 +6230,9 @@ void drcbe_x86::op_shr(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_shr_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // shr dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_shr_r64_p64(a, dstreg, edx, src2p, inst); // shr edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5681,7 +6241,7 @@ void drcbe_x86::op_shr(x86code *&dst, const instruction &inst)
// op_sar - process a SAR opcode
//-------------------------------------------------
-void drcbe_x86::op_sar(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_sar(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5694,21 +6254,31 @@ void drcbe_x86::op_sar(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_sar_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // sar [dstp],src2p
+ shift_op_param(a, Inst::kIdSar, inst.size(), MABS(dstp.memory(), 4), src2p, // sar [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_sar_r32_p32(dst, dstreg, src2p, inst); // sar dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdSar, inst.size(), dstreg, src2p, // sar dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5716,9 +6286,9 @@ void drcbe_x86::op_sar(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_sar_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // sar dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_sar_r64_p64(a, dstreg, edx, src2p, inst); // sar edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5727,7 +6297,7 @@ void drcbe_x86::op_sar(x86code *&dst, const instruction &inst)
// op_rol - process a rol opcode
//-------------------------------------------------
-void drcbe_x86::op_rol(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_rol(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5740,21 +6310,31 @@ void drcbe_x86::op_rol(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_rol_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // rol [dstp],src2p
+ shift_op_param(a, Inst::kIdRol, inst.size(), MABS(dstp.memory(), 4), src2p, // rol [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_rol_r32_p32(dst, dstreg, src2p, inst); // rol dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdRol, inst.size(), dstreg, src2p, // rol dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5762,9 +6342,9 @@ void drcbe_x86::op_rol(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_rol_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // rol dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_rol_r64_p64(a, dstreg, edx, src2p, inst); // rol edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5773,7 +6353,7 @@ void drcbe_x86::op_rol(x86code *&dst, const instruction &inst)
// op_ror - process a ROR opcode
//-------------------------------------------------
-void drcbe_x86::op_ror(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_ror(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5786,21 +6366,31 @@ void drcbe_x86::op_ror(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_ror_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // ror [dstp],src2p
+ shift_op_param(a, Inst::kIdRor, inst.size(), MABS(dstp.memory(), 4), src2p, // ror [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32(dst, dstreg, src1p); // mov dstreg,src1p
- emit_ror_r32_p32(dst, dstreg, src2p, inst); // ror dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdRor, inst.size(), dstreg, src2p, // rol dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5808,9 +6398,9 @@ void drcbe_x86::op_ror(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_ror_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // ror dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_ror_r64_p64(a, dstreg, edx, src2p, inst); // ror edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5819,7 +6409,7 @@ void drcbe_x86::op_ror(x86code *&dst, const instruction &inst)
// op_rolc - process a ROLC opcode
//-------------------------------------------------
-void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_rolc(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5832,21 +6422,31 @@ void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_rcl_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // rcl [dstp],src2p
+ shift_op_param(a, Inst::kIdRcl, inst.size(), MABS(dstp.memory(), 4), src2p, // rcl [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p
- emit_rcl_r32_p32(dst, dstreg, src2p, inst); // rcl dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdRcl, inst.size(), dstreg, src2p, // rcl dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5854,9 +6454,9 @@ void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_rcl_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // rcl dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_rcl_r64_p64(a, dstreg, edx, src2p, inst); // rcl edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5865,7 +6465,7 @@ void drcbe_x86::op_rolc(x86code *&dst, const instruction &inst)
// op_rorc - process a RORC opcode
//-------------------------------------------------
-void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_rorc(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5878,21 +6478,31 @@ void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MRI);
// pick a target register for the general case
- int dstreg = dstp.select_register(REG_EAX, src2p);
+ Gp const dstreg = dstp.select_register(eax, src2p);
// 32-bit form
if (inst.size() == 4)
{
// dstp == src1p in memory
if (dstp.is_memory() && dstp == src1p)
- emit_rcr_m32_p32(dst, MABS(dstp.memory()), src2p, inst); // rcr [dstp],src2p
+ shift_op_param(a, Inst::kIdRcr, inst.size(), MABS(dstp.memory(), 4), src2p, // rcr [dstp],src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
// general case
else
{
- emit_mov_r32_p32_keepflags(dst, dstreg, src1p); // mov dstreg,src1p
- emit_rcr_r32_p32(dst, dstreg, src2p, inst); // rcr dstreg,src2p
- emit_mov_p32_r32(dst, dstp, dstreg); // mov dstp,dstreg
+ emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p
+ shift_op_param(a, Inst::kIdRcr, inst.size(), dstreg, src2p, // rcr dstreg,src2p
+ [inst](Assembler &a, Operand const &dst, be_parameter const &src)
+ {
+ // optimize zero case
+ return (!inst.flags() && !src.immediate());
+ }, true);
+ emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg
}
}
@@ -5900,9 +6510,9 @@ void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst)
else if (inst.size() == 8)
{
// general case
- emit_mov_r64_p64_keepflags(dst, dstreg, REG_EDX, src1p); // mov dstreg:dstp,[src1p]
- emit_rcr_r64_p64(dst, dstreg, REG_EDX, src2p, inst); // rcr dstreg:dstp,src2p
- emit_mov_p64_r64(dst, dstp, dstreg, REG_EDX); // mov dstp,dstreg:eax
+ emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p]
+ emit_rcr_r64_p64(a, dstreg, edx, src2p, inst); // rcr edx:dstreg,src2p
+ emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg
}
}
@@ -5916,7 +6526,7 @@ void drcbe_x86::op_rorc(x86code *&dst, const instruction &inst)
// op_fload - process a FLOAD opcode
//-------------------------------------------------
-void drcbe_x86::op_fload(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fload(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5928,28 +6538,27 @@ void drcbe_x86::op_fload(x86code *&dst, const instruction &inst)
be_parameter basep(*this, inst.param(1), PTYPE_M);
be_parameter indp(*this, inst.param(2), PTYPE_MRI);
- // immediate index
if (indp.is_immediate())
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(basep.memory(4*indp.immediate()))); // mov eax,[basep + 4*indp]
+ // immediate index
+ a.mov(eax, MABS(basep.memory(inst.size()*indp.immediate())));
if (inst.size() == 8)
- emit_mov_r32_m32(dst, REG_EDX, MABS(basep.memory(4 + 4*indp.immediate()))); // mov edx,[basep + 4*indp + 4]
+ a.mov(edx, MABS(basep.memory(4 + inst.size()*indp.immediate())));
}
-
- // other index
else
{
- int indreg = indp.select_register(REG_ECX);
- emit_mov_r32_p32(dst, indreg, indp);
- emit_mov_r32_m32(dst, REG_EAX, MABSI(basep.memory(), indreg, 4)); // mov eax,[basep + 4*indp]
+ // other index
+ Gp const indreg = indp.select_register(ecx);
+ emit_mov_r32_p32_keepflags(a, indreg, indp);
+ a.mov(eax, ptr(uintptr_t(basep.memory(0)), indreg, (inst.size() == 8) ? 3 : 2));
if (inst.size() == 8)
- emit_mov_r32_m32(dst, REG_EDX, MABSI(basep.memory(4), indreg, 4)); // mov edx,[basep + 4*indp + 4]
+ a.mov(edx, ptr(uintptr_t(basep.memory(4)), indreg, (inst.size() == 8) ? 3 : 2));
}
// general case
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
+ a.mov(MABS(dstp.memory(0)), eax);
if (inst.size() == 8)
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp + 4],edx
+ a.mov(MABS(dstp.memory(4)), edx);
}
@@ -5957,7 +6566,7 @@ void drcbe_x86::op_fload(x86code *&dst, const instruction &inst)
// op_fstore - process a FSTORE opcode
//-------------------------------------------------
-void drcbe_x86::op_fstore(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fstore(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -5969,27 +6578,25 @@ void drcbe_x86::op_fstore(x86code *&dst, const instruction &inst)
be_parameter indp(*this, inst.param(1), PTYPE_MRI);
be_parameter srcp(*this, inst.param(2), PTYPE_MF);
- // general case
- emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp]
+ a.mov(eax, MABS(srcp.memory(0)));
if (inst.size() == 8)
- emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp + 4]
+ a.mov(edx, MABS(srcp.memory(4)));
- // immediate index
if (indp.is_immediate())
{
- emit_mov_m32_r32(dst, MABS(basep.memory(4*indp.immediate())), REG_EAX); // mov [basep + 4*indp],eax
+ // immediate index
+ a.mov(MABS(basep.memory(inst.size()*indp.immediate())), eax);
if (inst.size() == 8)
- emit_mov_m32_r32(dst, MABS(basep.memory(4 + 4*indp.immediate())), REG_EDX); // mov [basep + 4*indp + 4],edx
+ a.mov(MABS(basep.memory(4 + inst.size()*indp.immediate())), edx);
}
-
- // other index
else
{
- int indreg = indp.select_register(REG_ECX);
- emit_mov_r32_p32(dst, indreg, indp);
- emit_mov_m32_r32(dst, MABSI(basep.memory(), indreg, 4), REG_EAX); // mov [basep + 4*indp],eax
+ // other index
+ Gp const indreg = indp.select_register(ecx);
+ emit_mov_r32_p32_keepflags(a, indreg, indp);
+ a.mov(ptr(uintptr_t(basep.memory(0)), indreg, (inst.size() == 8) ? 3 : 2), eax);
if (inst.size() == 8)
- emit_mov_m32_r32(dst, MABSI(basep.memory(4), indreg, 4), REG_EDX); // mov [basep + 4*indp + 4],edx
+ a.mov(ptr(uintptr_t(basep.memory(4)), indreg, (inst.size() == 8) ? 3 : 2), edx);
}
}
@@ -5998,7 +6605,7 @@ void drcbe_x86::op_fstore(x86code *&dst, const instruction &inst)
// op_fread - process a FREAD opcode
//-------------------------------------------------
-void drcbe_x86::op_fread(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fread(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6008,23 +6615,27 @@ void drcbe_x86::op_fread(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter dstp(*this, inst.param(0), PTYPE_MF);
be_parameter addrp(*this, inst.param(1), PTYPE_MRI);
- const parameter &spacep = inst.param(2);
+ parameter const &spacep = inst.param(2);
assert(spacep.is_size_space());
assert((1 << spacep.size()) == inst.size());
// set up a call to the read dword/qword handler
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacep.space()]); // mov [esp],space
- if (inst.size() == 4)
- emit_call(dst, (x86code *)m_accessors[spacep.space()].read_dword); // call read_dword
- else if (inst.size() == 8)
- emit_call(dst, (x86code *)m_accessors[spacep.space()].read_qword); // call read_qword
+ auto const &accessors = m_memory_accessors[spacep.space()];
+ auto const &accessor = (inst.size() == 4) ? accessors.read_dword : accessors.read_qword;
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp);
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessor.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessor.obj));
+ a.call(imm(accessor.func));
+ if (USE_THISCALL)
+ a.sub(esp, 4);
// store result
if (inst.size() == 4)
- emit_mov_p32_r32(dst, dstp, REG_EAX); // mov dstp,eax
+ emit_mov_p32_r32(a, dstp, eax);
else if (inst.size() == 8)
- emit_mov_p64_r64(dst, dstp, REG_EAX, REG_EDX); // mov dstp,edx:eax
+ emit_mov_p64_r64(a, dstp, eax, edx);
}
@@ -6032,7 +6643,7 @@ void drcbe_x86::op_fread(x86code *&dst, const instruction &inst)
// op_fwrite - process a FWRITE opcode
//-------------------------------------------------
-void drcbe_x86::op_fwrite(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fwrite(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6042,21 +6653,25 @@ void drcbe_x86::op_fwrite(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter addrp(*this, inst.param(0), PTYPE_MRI);
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
- const parameter &spacep = inst.param(2);
+ parameter const &spacep = inst.param(2);
assert(spacep.is_size_space());
assert((1 << spacep.size()) == inst.size());
// set up a call to the write dword/qword handler
+ auto const &accessors = m_memory_accessors[spacep.space()];
+ auto const &accessor = (inst.size() == 4) ? accessors.write_dword : accessors.write_qword;
if (inst.size() == 4)
- emit_mov_m32_p32(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp
- else if (inst.size() == 8)
- emit_mov_m64_p64(dst, MBD(REG_ESP, 8), srcp); // mov [esp+8],srcp
- emit_mov_m32_p32(dst, MBD(REG_ESP, 4), addrp); // mov [esp+4],addrp
- emit_mov_m32_imm(dst, MBD(REG_ESP, 0), (uintptr_t)m_space[spacep.space()]); // mov [esp],space
- if (inst.size() == 4)
- emit_call(dst, (x86code *)m_accessors[spacep.space()].write_dword); // call write_dword
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 4 : 8), srcp);
else if (inst.size() == 8)
- emit_call(dst, (x86code *)m_accessors[spacep.space()].write_qword); // call write_qword
+ emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 4 : 8), srcp);
+ emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp);
+ if (USE_THISCALL)
+ a.mov(ecx, imm(accessor.obj));
+ else
+ a.mov(dword_ptr(esp, 0), imm(accessor.obj));
+ a.call(imm(accessor.func));
+ if (USE_THISCALL)
+ a.sub(esp, (inst.size() == 4) ? 8 : 12);
}
@@ -6064,7 +6679,7 @@ void drcbe_x86::op_fwrite(x86code *&dst, const instruction &inst)
// op_fmov - process a FMOV opcode
//-------------------------------------------------
-void drcbe_x86::op_fmov(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fmov(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6076,21 +6691,27 @@ void drcbe_x86::op_fmov(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// always start with a jmp
- emit_link skip = { nullptr };
+ Label skip;
if (inst.condition() != uml::COND_ALWAYS)
- emit_jcc_short_link(dst, X86_NOT_CONDITION(inst.condition()), skip); // jcc skip
+ {
+ skip = a.newLabel();
+ a.short_().j(X86_NOT_CONDITION(inst.condition()), skip);
+ }
// general case
- emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp]
+ a.mov(eax, MABS(srcp.memory(0)));
if (inst.size() == 8)
- emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp + 4]
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
+ a.mov(edx, MABS(srcp.memory(4)));
+ a.mov(MABS(dstp.memory(0)), eax);
if (inst.size() == 8)
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp + 4],edx
+ a.mov(MABS(dstp.memory(4)), edx);
// resolve the jump
- if (skip.target != nullptr)
- track_resolve_link(dst, skip); // skip:
+ if (inst.condition() != uml::COND_ALWAYS)
+ {
+ a.bind(skip);
+ reset_last_upper_lower_reg();
+ }
}
@@ -6098,7 +6719,7 @@ void drcbe_x86::op_fmov(x86code *&dst, const instruction &inst)
// op_ftoint - process a FTOINT opcode
//-------------------------------------------------
-void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_ftoint(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6108,20 +6729,20 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter dstp(*this, inst.param(0), PTYPE_MR);
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
- const parameter &sizep = inst.param(2);
+ parameter const &sizep = inst.param(2);
assert(sizep.is_size());
- const parameter &roundp = inst.param(3);
+ parameter const &roundp = inst.param(3);
assert(roundp.is_rounding());
// set rounding mode if necessary
if (roundp.rounding() != ROUND_DEFAULT && (!m_sse3 || roundp.rounding() != ROUND_TRUNC))
{
- emit_fstcw_m16(dst, MABS(&m_fmodesave)); // fstcw [fmodesave]
- emit_fldcw_m16(dst, MABS(&fp_control[roundp.rounding()])); // fldcw fpcontrol[roundp]
+ a.fstcw(MABS(&m_fmodesave)); // fstcw [fmodesave]
+ a.fldcw(MABS(&fp_control[roundp.rounding()])); // fldcw fpcontrol[roundp]
}
// general case
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
// 4-byte integer case
if (sizep.size() == SIZE_DWORD)
@@ -6129,17 +6750,17 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst)
if (dstp.is_memory())
{
if (!m_sse3 || roundp.rounding() != ROUND_TRUNC)
- emit_fistp_m32(dst, MABS(dstp.memory())); // fistp [dstp]
+ a.fistp(MABS(dstp.memory(), 4)); // fistp [dstp]
else
- emit_fisttp_m32(dst, MABS(dstp.memory())); // fisttp [dstp]
+ a.fisttp(MABS(dstp.memory(), 4)); // fisttp [dstp]
}
else if (dstp.is_int_register())
{
if (!m_sse3 || roundp.rounding() != ROUND_TRUNC)
- emit_fistp_m32(dst, MABS(m_reglo[dstp.ireg()])); // fistp reglo[dstp]
+ a.fistp(MABS(m_reglo[dstp.ireg()], 4)); // fistp reglo[dstp]
else
- emit_fisttp_m32(dst, MABS(m_reglo[dstp.ireg()])); // fisttp reglo[dstp]
- emit_mov_r32_m32(dst, dstp.ireg(), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp]
+ a.fisttp(MABS(m_reglo[dstp.ireg()], 4)); // fisttp reglo[dstp]
+ a.mov(Gpd(dstp.ireg()), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp]
}
}
@@ -6149,23 +6770,23 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst)
if (dstp.is_memory())
{
if (!m_sse3 || roundp.rounding() != ROUND_TRUNC)
- emit_fistp_m64(dst, MABS(dstp.memory())); // fistp [dstp]
+ a.fistp(MABS(dstp.memory(), 8)); // fistp [dstp]
else
- emit_fisttp_m64(dst, MABS(dstp.memory())); // fisttp [dstp]
+ a.fisttp(MABS(dstp.memory(), 8)); // fisttp [dstp]
}
else if (dstp.is_int_register())
{
if (!m_sse3 || roundp.rounding() != ROUND_TRUNC)
- emit_fistp_m64(dst, MABS(m_reglo[dstp.ireg()])); // fistp reglo[dstp]
+ a.fistp(MABS(m_reglo[dstp.ireg()], 8)); // fistp reglo[dstp]
else
- emit_fisttp_m64(dst, MABS(m_reglo[dstp.ireg()])); // fisttp reglo[dstp]
- emit_mov_r32_m32(dst, dstp.ireg(), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp]
+ a.fisttp(MABS(m_reglo[dstp.ireg()], 8)); // fisttp reglo[dstp]
+ a.mov(Gpd(dstp.ireg()), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp]
}
}
// restore control word and proceed
if (roundp.rounding() != ROUND_DEFAULT && (!m_sse3 || roundp.rounding() != ROUND_TRUNC))
- emit_fldcw_m16(dst, MABS(&m_fmodesave)); // fldcw [fmodesave]
+ a.fldcw(MABS(&m_fmodesave)); // fldcw [fmodesave]
}
@@ -6173,7 +6794,7 @@ void drcbe_x86::op_ftoint(x86code *&dst, const instruction &inst)
// op_ffrint - process a FFRINT opcode
//-------------------------------------------------
-void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_ffrint(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6183,7 +6804,7 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter dstp(*this, inst.param(0), PTYPE_MF);
be_parameter srcp(*this, inst.param(1), PTYPE_MRI);
- const parameter &sizep = inst.param(2);
+ parameter const &sizep = inst.param(2);
assert(sizep.is_size());
// 4-byte integer case
@@ -6191,15 +6812,15 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst)
{
if (srcp.is_immediate())
{
- emit_mov_m32_imm(dst, MABS(&m_fptemp), srcp.immediate()); // mov [fptemp],srcp
- emit_fild_m32(dst, MABS(&m_fptemp)); // fild [fptemp]
+ a.mov(MABS(&m_fptemp, 4), srcp.immediate()); // mov [fptemp],srcp
+ a.fild(MABS(&m_fptemp, 4)); // fild [fptemp]
}
else if (srcp.is_memory())
- emit_fild_m32(dst, MABS(srcp.memory())); // fild [srcp]
+ a.fild(MABS(srcp.memory(), 4)); // fild [srcp]
else if (srcp.is_int_register())
{
- emit_mov_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), srcp.ireg()); // mov reglo[srcp],srcp
- emit_fild_m32(dst, MABS(m_reglo[srcp.ireg()])); // fild reglo[srcp]
+ a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov reglo[srcp],srcp
+ a.fild(MABS(m_reglo[srcp.ireg()], 4)); // fild reglo[srcp]
}
}
@@ -6208,21 +6829,21 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst)
{
if (srcp.is_immediate())
{
- emit_mov_m32_imm(dst, MABS(&m_fptemp), srcp.immediate()); // mov [fptemp],srcp
- emit_mov_m32_imm(dst, MABS((uint8_t *)&m_fptemp + 4), srcp.immediate()); // mov [fptemp+4],srcp
- emit_fild_m64(dst, MABS(&m_fptemp)); // fild [fptemp]
+ a.mov(MABS(&m_fptemp, 4), srcp.immediate()); // mov [fptemp],srcp
+ a.mov(MABS((uint8_t *)&m_fptemp + 4, 4), srcp.immediate()); // mov [fptemp+4],srcp
+ a.fild(MABS(&m_fptemp, 8)); // fild [fptemp]
}
else if (srcp.is_memory())
- emit_fild_m64(dst, MABS(srcp.memory())); // fild [srcp]
+ a.fild(MABS(srcp.memory(), 8)); // fild [srcp]
else if (srcp.is_int_register())
{
- emit_mov_m32_r32(dst, MABS(m_reglo[srcp.ireg()]), srcp.ireg()); // mov reglo[srcp],srcp
- emit_fild_m64(dst, MABS(m_reglo[srcp.ireg()])); // fild reglo[srcp]
+ a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov reglo[srcp],srcp
+ a.fild(MABS(m_reglo[srcp.ireg()], 8)); // fild reglo[srcp]
}
}
// store the result and be done
- emit_fstp_p(dst, inst.size(), dstp); // fstp [dstp]
+ emit_fstp_p(a, inst.size(), dstp); // fstp [dstp]
}
@@ -6230,7 +6851,7 @@ void drcbe_x86::op_ffrint(x86code *&dst, const instruction &inst)
// op_ffrflt - process a FFRFLT opcode
//-------------------------------------------------
-void drcbe_x86::op_ffrflt(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_ffrflt(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6240,15 +6861,15 @@ void drcbe_x86::op_ffrflt(x86code *&dst, const instruction &inst)
// normalize parameters
be_parameter dstp(*this, inst.param(0), PTYPE_MF);
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
- const parameter &sizep = inst.param(2);
+ parameter const &sizep = inst.param(2);
assert(sizep.is_size());
// general case
if (sizep.size() == SIZE_DWORD)
- emit_fld_m32(dst, MABS(srcp.memory())); // fld [srcp]
+ a.fld(MABS(srcp.memory(), 4)); // fld [srcp]
else if (sizep.size() == SIZE_QWORD)
- emit_fld_m64(dst, MABS(srcp.memory())); // fld [srcp]
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ a.fld(MABS(srcp.memory(), 8)); // fld [srcp]
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6256,7 +6877,7 @@ void drcbe_x86::op_ffrflt(x86code *&dst, const instruction &inst)
// op_frnds - process a FRNDS opcode
//-------------------------------------------------
-void drcbe_x86::op_frnds(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_frnds(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 8);
@@ -6268,10 +6889,10 @@ void drcbe_x86::op_frnds(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
- emit_fstp_m32(dst, MABS(&m_fptemp)); // fstp [fptemp]
- emit_fld_m32(dst, MABS(&m_fptemp)); // fld [fptemp]
- emit_fstp_p(dst, inst.size(), dstp); // fstp [dstp]
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
+ a.fstp(MABS(&m_fptemp, 4)); // fstp [fptemp]
+ a.fld(MABS(&m_fptemp, 4)); // fld [fptemp]
+ emit_fstp_p(a, inst.size(), dstp); // fstp [dstp]
}
@@ -6279,7 +6900,7 @@ void drcbe_x86::op_frnds(x86code *&dst, const instruction &inst)
// op_fadd - process a FADD opcode
//-------------------------------------------------
-void drcbe_x86::op_fadd(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fadd(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6293,10 +6914,10 @@ void drcbe_x86::op_fadd(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// general case
- emit_fld_p(dst, inst.size(), src1p); // fld src1p
- emit_fld_p(dst, inst.size(), src2p); // fld src2p
- emit_faddp(dst); // faddp
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), src1p); // fld src1p
+ emit_fld_p(a, inst.size(), src2p); // fld src2p
+ a.faddp(); // faddp
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6304,7 +6925,7 @@ void drcbe_x86::op_fadd(x86code *&dst, const instruction &inst)
// op_fsub - process a FSUB opcode
//-------------------------------------------------
-void drcbe_x86::op_fsub(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fsub(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6317,10 +6938,10 @@ void drcbe_x86::op_fsub(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), src1p); // fld src1p
- emit_fld_p(dst, inst.size(), src2p); // fld src2p
- emit_fsubp(dst); // fsubp
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), src1p); // fld src1p
+ emit_fld_p(a, inst.size(), src2p); // fld src2p
+ a.fsubp(); // fsubp
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6328,7 +6949,7 @@ void drcbe_x86::op_fsub(x86code *&dst, const instruction &inst)
// op_fcmp - process a FCMP opcode
//-------------------------------------------------
-void drcbe_x86::op_fcmp(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fcmp(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6340,11 +6961,11 @@ void drcbe_x86::op_fcmp(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), src2p); // fld src2p
- emit_fld_p(dst, inst.size(), src1p); // fld src1p
- emit_fcompp(dst); // fcompp
- emit_fstsw_ax(dst); // fnstsw ax
- emit_sahf(dst); // sahf
+ emit_fld_p(a, inst.size(), src2p); // fld src2p
+ emit_fld_p(a, inst.size(), src1p); // fld src1p
+ a.fcompp(); // fcompp
+ a.fnstsw(ax); // fnstsw ax
+ a.sahf(); // sahf
}
@@ -6352,7 +6973,7 @@ void drcbe_x86::op_fcmp(x86code *&dst, const instruction &inst)
// op_fmul - process a FMUL opcode
//-------------------------------------------------
-void drcbe_x86::op_fmul(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fmul(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6366,10 +6987,10 @@ void drcbe_x86::op_fmul(x86code *&dst, const instruction &inst)
normalize_commutative(src1p, src2p);
// general case
- emit_fld_p(dst, inst.size(), src1p); // fld src1p
- emit_fld_p(dst, inst.size(), src2p); // fld src2p
- emit_fmulp(dst); // fmulp
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), src1p); // fld src1p
+ emit_fld_p(a, inst.size(), src2p); // fld src2p
+ a.fmulp(); // fmulp
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6377,7 +6998,7 @@ void drcbe_x86::op_fmul(x86code *&dst, const instruction &inst)
// op_fdiv - process a FDIV opcode
//-------------------------------------------------
-void drcbe_x86::op_fdiv(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fdiv(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6390,10 +7011,10 @@ void drcbe_x86::op_fdiv(x86code *&dst, const instruction &inst)
be_parameter src2p(*this, inst.param(2), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), src1p); // fld src1p
- emit_fld_p(dst, inst.size(), src2p); // fld src2p
- emit_fdivp(dst); // fdivp
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), src1p); // fld src1p
+ emit_fld_p(a, inst.size(), src2p); // fld src2p
+ a.fdivp(); // fdivp
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6401,7 +7022,7 @@ void drcbe_x86::op_fdiv(x86code *&dst, const instruction &inst)
// op_fneg - process a FNEG opcode
//-------------------------------------------------
-void drcbe_x86::op_fneg(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fneg(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6413,9 +7034,9 @@ void drcbe_x86::op_fneg(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
- emit_fchs(dst); // fchs
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
+ a.fchs(); // fchs
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6423,7 +7044,7 @@ void drcbe_x86::op_fneg(x86code *&dst, const instruction &inst)
// op_fabs - process a FABS opcode
//-------------------------------------------------
-void drcbe_x86::op_fabs(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fabs(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6435,9 +7056,9 @@ void drcbe_x86::op_fabs(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
- emit_fabs(dst); // fabs
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
+ a.fabs(); // fabs
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6445,7 +7066,7 @@ void drcbe_x86::op_fabs(x86code *&dst, const instruction &inst)
// op_fsqrt - process a FSQRT opcode
//-------------------------------------------------
-void drcbe_x86::op_fsqrt(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fsqrt(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6457,9 +7078,9 @@ void drcbe_x86::op_fsqrt(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
- emit_fsqrt(dst); // fsqrt
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
+ a.fsqrt(); // fsqrt
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6467,7 +7088,7 @@ void drcbe_x86::op_fsqrt(x86code *&dst, const instruction &inst)
// op_frecip - process a FRECIP opcode
//-------------------------------------------------
-void drcbe_x86::op_frecip(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_frecip(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6479,10 +7100,10 @@ void drcbe_x86::op_frecip(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld1(dst); // fld1
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
- emit_fdivp(dst); // fdivp
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ a.fld1(); // fld1
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
+ a.fdivp(); // fdivp
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6490,7 +7111,7 @@ void drcbe_x86::op_frecip(x86code *&dst, const instruction &inst)
// op_frsqrt - process a FRSQRT opcode
//-------------------------------------------------
-void drcbe_x86::op_frsqrt(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_frsqrt(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6502,11 +7123,11 @@ void drcbe_x86::op_frsqrt(x86code *&dst, const instruction &inst)
be_parameter srcp(*this, inst.param(1), PTYPE_MF);
// general case
- emit_fld1(dst); // fld1
- emit_fld_p(dst, inst.size(), srcp); // fld srcp
- emit_fsqrt(dst); // fsqrt
- emit_fdivp(dst); // fdivp
- emit_fstp_p(dst, inst.size(), dstp); // fstp dstp
+ a.fld1(); // fld1
+ emit_fld_p(a, inst.size(), srcp); // fld srcp
+ a.fsqrt(); // fsqrt
+ a.fdivp(); // fdivp
+ emit_fstp_p(a, inst.size(), dstp); // fstp dstp
}
@@ -6514,7 +7135,7 @@ void drcbe_x86::op_frsqrt(x86code *&dst, const instruction &inst)
// op_fcopyi - process a FCOPYI opcode
//-------------------------------------------------
-void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_fcopyi(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6530,12 +7151,12 @@ void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst)
{
if (srcp.is_memory())
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp]
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
+ a.mov(eax, MABS(srcp.memory())); // mov eax,[srcp]
+ a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax
}
else if (srcp.is_int_register())
{
- emit_mov_m32_r32(dst, MABS(dstp.memory()), srcp.ireg()); // mov [dstp],srcp
+ a.mov(MABS(dstp.memory()), Gpd(srcp.ireg())); // mov [dstp],srcp
}
}
@@ -6544,17 +7165,17 @@ void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst)
{
if (srcp.is_memory())
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp]
- emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp+4]
+ a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp]
+ a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp+4]
}
else if (srcp.is_int_register())
{
- emit_mov_r32_m32(dst, REG_EDX, MABS(m_reghi[srcp.ireg()])); // mov edx,[reghi[srcp]]
- emit_mov_r32_r32(dst, REG_EAX, srcp.ireg()); // mov eax,srcp
+ a.mov(edx, MABS(m_reghi[srcp.ireg()])); // mov edx,[reghi[srcp]]
+ a.mov(eax, Gpd(srcp.ireg())); // mov eax,srcp
}
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx
+ a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax
+ a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx
}
}
@@ -6563,7 +7184,7 @@ void drcbe_x86::op_fcopyi(x86code *&dst, const instruction &inst)
// op_icopyf - process a ICOPYF opcode
//-------------------------------------------------
-void drcbe_x86::op_icopyf(x86code *&dst, const instruction &inst)
+void drcbe_x86::op_icopyf(Assembler &a, const instruction &inst)
{
// validate instruction
assert(inst.size() == 4 || inst.size() == 8);
@@ -6577,172 +7198,50 @@ void drcbe_x86::op_icopyf(x86code *&dst, const instruction &inst)
// 32-bit case
if (inst.size() == 4)
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp]
+ a.mov(eax, MABS(srcp.memory())); // mov eax,[srcp]
if (dstp.is_memory())
{
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
+ a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax
}
else if (dstp.is_int_register())
{
- emit_mov_r32_r32(dst, dstp.ireg(), REG_EAX); // mov dstp,eax
+ a.mov(Gpd(dstp.ireg()), eax); // mov dstp,eax
}
}
// 64-bit case
else if (inst.size() == 8)
{
- emit_mov_r32_m32(dst, REG_EAX, MABS(srcp.memory())); // mov eax,[srcp]
- emit_mov_r32_m32(dst, REG_EDX, MABS(srcp.memory(4))); // mov edx,[srcp+4]
+ a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp]
+ a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp+4]
if (dstp.is_memory())
{
- emit_mov_m32_r32(dst, MABS(dstp.memory()), REG_EAX); // mov [dstp],eax
- emit_mov_m32_r32(dst, MABS(dstp.memory(4)), REG_EDX); // mov [dstp+4],edx
+ a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax
+ a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx
}
else
{
- emit_mov_m32_r32(dst, MABS(m_reghi[dstp.ireg()]), REG_EDX); // mov [reghi[dstp]],edx
- emit_mov_r32_r32(dst, dstp.ireg(), REG_EAX); // mov dstp,eax
+ a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi[dstp]],edx
+ a.mov(Gpd(dstp.ireg()), eax); // mov dstp,eax
}
}
}
+} // anonymous namespace
-//**************************************************************************
-// MISCELLAENOUS FUNCTIONS
-//**************************************************************************
-
-//-------------------------------------------------
-// dmulu - perform a double-wide unsigned multiply
-//-------------------------------------------------
-
-int drcbe_x86::dmulu(uint64_t &dstlo, uint64_t &dsthi, uint64_t src1, uint64_t src2, bool flags)
-{
- // shortcut if we don't care about the high bits or the flags
- if (&dstlo == &dsthi && flags == false)
- {
- dstlo = src1 * src2;
- return 0;
- }
-
- // fetch source values
- uint64_t a = src1;
- uint64_t b = src2;
- if (a == 0 || b == 0)
- {
- dsthi = dstlo = 0;
- return FLAG_Z;
- }
-
- // compute high and low parts first
- uint64_t lo = (uint64_t)(uint32_t)(a >> 0) * (uint64_t)(uint32_t)(b >> 0);
- uint64_t hi = (uint64_t)(uint32_t)(a >> 32) * (uint64_t)(uint32_t)(b >> 32);
-
- // compute middle parts
- uint64_t prevlo = lo;
- uint64_t temp = (uint64_t)(uint32_t)(a >> 32) * (uint64_t)(uint32_t)(b >> 0);
- lo += temp << 32;
- hi += (temp >> 32) + (lo < prevlo);
-
- prevlo = lo;
- temp = (uint64_t)(uint32_t)(a >> 0) * (uint64_t)(uint32_t)(b >> 32);
- lo += temp << 32;
- hi += (temp >> 32) + (lo < prevlo);
-
- // store the results
- dsthi = hi;
- dstlo = lo;
- return ((hi >> 60) & FLAG_S) | ((dsthi != 0) << 1);
-}
-
-
-//-------------------------------------------------
-// dmuls - perform a double-wide signed multiply
-//-------------------------------------------------
-
-int drcbe_x86::dmuls(uint64_t &dstlo, uint64_t &dsthi, int64_t src1, int64_t src2, bool flags)
+std::unique_ptr<drcbe_interface> make_drcbe_x86(
+ drcuml_state &drcuml,
+ device_t &device,
+ drc_cache &cache,
+ uint32_t flags,
+ int modes,
+ int addrbits,
+ int ignorebits)
{
- uint64_t lo, hi, prevlo;
- uint64_t a, b, temp;
-
- // shortcut if we don't care about the high bits or the flags
- if (&dstlo == &dsthi && flags == false)
- {
- dstlo = src1 * src2;
- return 0;
- }
-
- // fetch absolute source values
- a = src1; if ((int64_t)a < 0) a = -a;
- b = src2; if ((int64_t)b < 0) b = -b;
- if (a == 0 || b == 0)
- {
- dsthi = dstlo = 0;
- return FLAG_Z;
- }
-
- // compute high and low parts first
- lo = (uint64_t)(uint32_t)(a >> 0) * (uint64_t)(uint32_t)(b >> 0);
- hi = (uint64_t)(uint32_t)(a >> 32) * (uint64_t)(uint32_t)(b >> 32);
-
- // compute middle parts
- prevlo = lo;
- temp = (uint64_t)(uint32_t)(a >> 32) * (uint64_t)(uint32_t)(b >> 0);
- lo += temp << 32;
- hi += (temp >> 32) + (lo < prevlo);
-
- prevlo = lo;
- temp = (uint64_t)(uint32_t)(a >> 0) * (uint64_t)(uint32_t)(b >> 32);
- lo += temp << 32;
- hi += (temp >> 32) + (lo < prevlo);
-
- // adjust for signage
- if ((int64_t)(src1 ^ src2) < 0)
- {
- hi = ~hi + (lo == 0);
- lo = ~lo + 1;
- }
-
- // store the results
- dsthi = hi;
- dstlo = lo;
- return ((hi >> 60) & FLAG_S) | ((dsthi != ((int64_t)lo >> 63)) << 1);
-}
-
-
-//-------------------------------------------------
-// ddivu - perform a double-wide unsigned divide
-//-------------------------------------------------
-
-int drcbe_x86::ddivu(uint64_t &dstlo, uint64_t &dsthi, uint64_t src1, uint64_t src2)
-{
- // do nothing if src2 == 0
- if (src2 == 0)
- return FLAG_V;
-
- dstlo = src1 / src2;
- if (&dstlo != &dsthi)
- dsthi = src1 % src2;
- return ((dstlo == 0) << 2) | ((dstlo >> 60) & FLAG_S);
-}
-
-
-//-------------------------------------------------
-// ddivs - perform a double-wide signed divide
-//-------------------------------------------------
-
-int drcbe_x86::ddivs(uint64_t &dstlo, uint64_t &dsthi, int64_t src1, int64_t src2)
-{
- // do nothing if src2 == 0
- if (src2 == 0)
- return FLAG_V;
-
- dstlo = src1 / src2;
- if (&dstlo != &dsthi)
- dsthi = src1 % src2;
- return ((dstlo == 0) << 2) | ((dstlo >> 60) & FLAG_S);
+ return std::unique_ptr<drcbe_interface>(new drcbe_x86(drcuml, device, cache, flags, modes, addrbits, ignorebits));
}
} // namespace drc