// license:BSD-3-Clause // copyright-holders:Aaron Giles /*************************************************************************** drcbex86.c 32-bit x86 back-end for the universal machine language. **************************************************************************** Future improvements/changes: * Optimize to avoid unnecessary reloads - especially EDX for 64-bit operations - also FCMP/FLAGS has unnecessary PUSHF/POP EAX * Identify common pairs and optimize output * Convert SUB a,0,b to NEG * Optimize, e.g., and [r5],i0,$FF to use ebx as temporary register (avoid initial move) if i0 is not needed going forward **************************************************************************** --------------- ABI/conventions --------------- Registers: EAX - volatile, function return value EBX - non-volatile ECX - volatile EDX - volatile, function return value (upper 32 bits) ESI - non-volatile EDI - non-volatile EBP - non-volatile FP stack - volatile --------------- Execution model --------------- Registers: EAX - scratch register EBX - maps to I0 (low 32 bits) ECX - scratch register EDX - scratch register ESI - maps to I1 (low 32 bits) EDI - maps to I2 (low 32 bits) EBP - maps to I3 (low 32 bits) FP stack - scratch registers Entry point: Assumes 1 parameter passed, which is the codeptr of the code to execute once the environment is set up. Exit point: Assumes exit value is in EAX. Entry stack: [esp] - return [esp+4] - input parameter (entry handle) Runtime stack: [esp] - param 0 [esp+4] - param 1 [esp+8] - param 2 [esp+12] - param 3 [esp+16] - param 4 [esp+20] - alignment [esp+24] - alignment [esp+28] - saved ebp [esp+32] - saved edi [esp+36] - saved esi [esp+40] - saved ebx [esp+44] - ret [esp+48] - input parameter (entry handle) **************************************************************************/ #include "emu.h" #include "drcbex86.h" #include "drcbeut.h" #include "x86log.h" #include "debug/debugcpu.h" #include "emuopts.h" #include "mfpresolve.h" #include "asmjit/src/asmjit/asmjit.h" #include #include #include namespace drc { namespace { using namespace uml; using namespace asmjit; using namespace asmjit::x86; //************************************************************************** // DEBUGGING //************************************************************************** #define LOG_HASHJMPS (0) //************************************************************************** // CONSTANTS //************************************************************************** #ifdef _WIN32 constexpr bool USE_THISCALL = true; #else constexpr bool USE_THISCALL = false; #endif const uint32_t PTYPE_M = 1 << parameter::PTYPE_MEMORY; const uint32_t PTYPE_I = 1 << parameter::PTYPE_IMMEDIATE; const uint32_t PTYPE_R = 1 << parameter::PTYPE_INT_REGISTER; const uint32_t PTYPE_F = 1 << parameter::PTYPE_FLOAT_REGISTER; //const uint32_t PTYPE_MI = PTYPE_M | PTYPE_I; //const uint32_t PTYPE_RI = PTYPE_R | PTYPE_I; const uint32_t PTYPE_MR = PTYPE_M | PTYPE_R; const uint32_t PTYPE_MRI = PTYPE_M | PTYPE_R | PTYPE_I; const uint32_t PTYPE_MF = PTYPE_M | PTYPE_F; // size-to-mask table //const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU }; // register mapping tables const Gp::Id int_register_map[REG_I_COUNT] = { Gp::kIdBx, Gp::kIdSi, Gp::kIdDi, Gp::kIdBp }; // flags mapping tables uint8_t flags_map[0x1000]; uint32_t flags_unmap[0x20]; // condition mapping table const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = { CondCode::kZ, // COND_Z = 0x80, requires Z CondCode::kNZ, // COND_NZ, requires Z CondCode::kS, // COND_S, requires S CondCode::kNS, // COND_NS, requires S CondCode::kC, // COND_C, requires C CondCode::kNC, // COND_NC, requires C CondCode::kO, // COND_V, requires V CondCode::kNO, // COND_NV, requires V CondCode::kP, // COND_U, requires U CondCode::kNP, // COND_NU, requires U CondCode::kA, // COND_A, requires CZ CondCode::kBE, // COND_BE, requires CZ CondCode::kG, // COND_G, requires SVZ CondCode::kLE, // COND_LE, requires SVZ CondCode::kL, // COND_L, requires SV CondCode::kGE, // COND_GE, requires SV }; // FPU control register mapping const uint16_t fp_control[4] = { 0x0e3f, // ROUND_TRUNC 0x023f, // ROUND_ROUND 0x0a3f, // ROUND_CEIL 0x063f // ROUND_FLOOR }; //************************************************************************** // MACROS //************************************************************************** #define X86_CONDITION(condition) (condition_map[condition - uml::COND_Z]) #define X86_NOT_CONDITION(condition) negateCond(condition_map[condition - uml::COND_Z]) #define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) #define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) #define assert_no_flags(inst) assert((inst).flags() == 0) #define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) //************************************************************************** // MISCELLAENOUS FUNCTIONS //************************************************************************** void calculate_status_flags(Assembler &a, Operand const &dst, u8 flags) { // calculate status flags in a way that does not modify any other status flags uint32_t flagmask = 0; if (flags & FLAG_C) flagmask |= 0x001; if (flags & FLAG_V) flagmask |= 0x800; if (flags & FLAG_Z) flagmask |= 0x040; if (flags & FLAG_S) flagmask |= 0x080; if (flags & FLAG_U) flagmask |= 0x004; if ((flags & (FLAG_Z | FLAG_S)) == flags) { Gp tempreg = dst.isMem() ? eax : dst.as().id() == ebx.id() ? eax : ebx; Gp tempreg2 = dst.isMem() ? edx : dst.as().id() == ecx.id() ? edx : ecx; if (dst.isMem()) { a.push(tempreg2); a.mov(tempreg2, dst.as()); } a.push(tempreg); a.pushfd(); a.pop(tempreg); a.and_(tempreg, ~flagmask); a.add(dst.isMem() ? tempreg2.as() : dst.as(), 0); a.pushfd(); a.and_(dword_ptr(esp), flagmask); a.or_(dword_ptr(esp), tempreg); a.popfd(); a.pop(tempreg); if (dst.isMem()) a.pop(tempreg2); } else { fatalerror("drcbe_x86::calculate_status_flags: unknown flag combination requested: %02x\n", flags); } } //------------------------------------------------- // dmulu - perform a double-wide unsigned multiply //------------------------------------------------- template int dmulu(uint64_t &dstlo, uint64_t &dsthi, uint64_t src1, uint64_t src2, bool flags) { // shortcut if we don't care about the high bits or the flags if (&dstlo == &dsthi && !flags) { dstlo = src1 * src2; return 0; } if (!src1 || !src2) { dsthi = dstlo = 0; return FLAG_Z; } // compute high and low parts first uint64_t lo = uint64_t(uint32_t(src1 >> 0)) * uint64_t(uint32_t(src2 >> 0)); uint64_t hi = uint64_t(uint32_t(src1 >> 32)) * uint64_t(uint32_t(src2 >> 32)); // compute middle parts uint64_t prevlo = lo; uint64_t temp = uint64_t(uint32_t(src1 >> 32)) * uint64_t(uint32_t(src2 >> 0)); lo += temp << 32; hi += (temp >> 32) + (lo < prevlo); prevlo = lo; temp = uint64_t(uint32_t(src1 >> 0)) * uint64_t(uint32_t(src2 >> 32)); lo += temp << 32; hi += (temp >> 32) + (lo < prevlo); // store the results dsthi = hi; dstlo = lo; if (HalfmulFlags) return ((lo >> 60) & FLAG_S) | (hi ? FLAG_V : 0) | (!lo ? FLAG_Z : 0); else return ((hi >> 60) & FLAG_S) | (hi ? FLAG_V : 0) | ((!hi && !lo) ? FLAG_Z : 0); } //------------------------------------------------- // dmuls - perform a double-wide signed multiply //------------------------------------------------- template int dmuls(uint64_t &dstlo, uint64_t &dsthi, int64_t src1, int64_t src2, bool flags) { uint64_t lo, hi, prevlo; uint64_t a, b, temp; // shortcut if we don't care about the high bits or the flags if (&dstlo == &dsthi && !flags) { dstlo = src1 * src2; return 0; } if (!src1 || !src2) { dsthi = dstlo = 0; return FLAG_Z; } // fetch absolute source values a = src1; if (int64_t(a) < 0) a = -a; b = src2; if (int64_t(b) < 0) b = -b; // compute high and low parts first lo = uint64_t(uint32_t(a >> 0)) * uint64_t(uint32_t(b >> 0)); hi = uint64_t(uint32_t(a >> 32)) * uint64_t(uint32_t(b >> 32)); // compute middle parts prevlo = lo; temp = uint64_t(uint32_t(a >> 32)) * uint64_t(uint32_t(b >> 0)); lo += temp << 32; hi += (temp >> 32) + (lo < prevlo); prevlo = lo; temp = uint64_t(uint32_t(a >> 0)) * uint64_t(uint32_t(b >> 32)); lo += temp << 32; hi += (temp >> 32) + (lo < prevlo); // adjust for signage if (int64_t(src1 ^ src2) < 0) { hi = ~hi + (lo == 0); lo = ~lo + 1; } // store the results dsthi = hi; dstlo = lo; if (HalfmulFlags) return ((lo >> 60) & FLAG_S) | ((hi != (int64_t(lo) >> 63)) ? FLAG_V : 0) | (!lo ? FLAG_Z : 0); else return ((hi >> 60) & FLAG_S) | ((hi != (int64_t(lo) >> 63)) ? FLAG_V : 0) | ((!hi && !lo) ? FLAG_Z : 0); } //------------------------------------------------- // ddivu - perform a double-wide unsigned divide //------------------------------------------------- int ddivu(uint64_t &dstlo, uint64_t &dsthi, uint64_t src1, uint64_t src2) { // do nothing if src2 == 0 if (src2 == 0) return FLAG_V; dstlo = src1 / src2; if (&dstlo != &dsthi) dsthi = src1 % src2; return ((dstlo == 0) << 2) | ((dstlo >> 60) & FLAG_S); } //------------------------------------------------- // ddivs - perform a double-wide signed divide //------------------------------------------------- int ddivs(uint64_t &dstlo, uint64_t &dsthi, int64_t src1, int64_t src2) { // do nothing if src2 == 0 if (src2 == 0) return FLAG_V; dstlo = src1 / src2; if (&dstlo != &dsthi) dsthi = src1 % src2; return ((dstlo == 0) << 2) | ((dstlo >> 60) & FLAG_S); } //************************************************************************** // TYPE DEFINITIONS //************************************************************************** class ThrowableErrorHandler : public ErrorHandler { public: void handleError(Error err, const char *message, BaseEmitter *origin) override { throw emu_fatalerror("asmjit error %d: %s", err, message); } }; class drcbe_x86 : public drcbe_interface { using x86_entry_point_func = uint32_t (*)(x86code *entry); public: // construction/destruction drcbe_x86(drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits); virtual ~drcbe_x86(); // required overrides virtual void reset() override; virtual int execute(uml::code_handle &entry) override; virtual void generate(drcuml_block &block, const uml::instruction *instlist, uint32_t numinst) override; virtual bool hash_exists(uint32_t mode, uint32_t pc) const noexcept override; virtual void get_info(drcbe_info &info) const noexcept override; virtual bool logging() const noexcept override { return m_log != nullptr; } private: // HACK: leftover from x86emit static inline constexpr int REG_MAX = 16; // a be_parameter is similar to a uml::parameter but maps to native registers/memory class be_parameter { public: // parameter types enum be_parameter_type { PTYPE_NONE = 0, // invalid PTYPE_IMMEDIATE, // immediate; value = sign-extended to 64 bits PTYPE_INT_REGISTER, // integer register; value = 0-REG_MAX PTYPE_FLOAT_REGISTER, // floating point register; value = 0-REG_MAX PTYPE_MEMORY, // memory; value = pointer to memory PTYPE_MAX }; // represents the value of a parameter typedef uint64_t be_parameter_value; // construction be_parameter() : m_type(PTYPE_NONE), m_value(0) { } be_parameter(uint64_t val) : m_type(PTYPE_IMMEDIATE), m_value(val) { } be_parameter(drcbe_x86 &drcbe, const uml::parameter ¶m, uint32_t allowed); be_parameter(const be_parameter ¶m) = default; // creators for types that don't safely default static be_parameter make_ireg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_INT_REGISTER, regnum); } static be_parameter make_freg(int regnum) { assert(regnum >= 0 && regnum < REG_MAX); return be_parameter(PTYPE_FLOAT_REGISTER, regnum); } static be_parameter make_memory(void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast(base)); } static be_parameter make_memory(const void *base) { return be_parameter(PTYPE_MEMORY, reinterpret_cast(const_cast(base))); } // operators bool operator==(be_parameter const &rhs) const { return (m_type == rhs.m_type && m_value == rhs.m_value); } bool operator!=(be_parameter const &rhs) const { return (m_type != rhs.m_type || m_value != rhs.m_value); } // getters be_parameter_type type() const { return m_type; } uint64_t immediate() const { assert(m_type == PTYPE_IMMEDIATE); return m_value; } uint32_t ireg() const { assert(m_type == PTYPE_INT_REGISTER); assert(m_value < REG_MAX); return m_value; } uint32_t freg() const { assert(m_type == PTYPE_FLOAT_REGISTER); assert(m_value < REG_MAX); return m_value; } void *memory(uint32_t offset = 0) const { assert(m_type == PTYPE_MEMORY); return reinterpret_cast(m_value + offset); } // type queries bool is_immediate() const { return (m_type == PTYPE_IMMEDIATE); } bool is_int_register() const { return (m_type == PTYPE_INT_REGISTER); } bool is_float_register() const { return (m_type == PTYPE_FLOAT_REGISTER); } bool is_memory() const { return (m_type == PTYPE_MEMORY); } // other queries bool is_immediate_value(uint64_t value) const { return (m_type == PTYPE_IMMEDIATE && m_value == value); } // helpers Gpd select_register(Gpd const &defreg) const; Xmm select_register(Xmm defreg) const; template T select_register(T defreg, be_parameter const &checkparam) const; template T select_register(T defreg, be_parameter const &checkparam, be_parameter const &checkparam2) const; private: // private constructor be_parameter(be_parameter_type type, be_parameter_value value) : m_type(type), m_value(value) { } // internals be_parameter_type m_type; // parameter type be_parameter_value m_value; // parameter value }; // helpers Mem MABS(void const *base, u32 const size = 0) const { return Mem(uintptr_t(base), size); } void normalize_commutative(be_parameter &inner, be_parameter &outer); void emit_combine_z_flags(Assembler &a); void emit_combine_zs_flags(Assembler &a); void emit_combine_z_shl_flags(Assembler &a); void reset_last_upper_lower_reg(); void set_last_lower_reg(Assembler &a, be_parameter const ¶m, Gp const ®lo); void set_last_upper_reg(Assembler &a, be_parameter const ¶m, Gp const ®hi); bool can_skip_lower_load(Assembler &a, uint32_t *memref, Gp const ®lo); bool can_skip_upper_load(Assembler &a, uint32_t *memref, Gp const ®hi); [[noreturn]] void end_of_block() const; static void debug_log_hashjmp(int mode, offs_t pc); void generate_one(Assembler &a, const uml::instruction &inst); // code generators void op_handle(Assembler &a, const uml::instruction &inst); void op_hash(Assembler &a, const uml::instruction &inst); void op_label(Assembler &a, const uml::instruction &inst); void op_comment(Assembler &a, const uml::instruction &inst); void op_mapvar(Assembler &a, const uml::instruction &inst); void op_nop(Assembler &a, const uml::instruction &inst); void op_break(Assembler &a, const uml::instruction &inst); void op_debug(Assembler &a, const uml::instruction &inst); void op_exit(Assembler &a, const uml::instruction &inst); void op_hashjmp(Assembler &a, const uml::instruction &inst); void op_jmp(Assembler &a, const uml::instruction &inst); void op_exh(Assembler &a, const uml::instruction &inst); void op_callh(Assembler &a, const uml::instruction &inst); void op_ret(Assembler &a, const uml::instruction &inst); void op_callc(Assembler &a, const uml::instruction &inst); void op_recover(Assembler &a, const uml::instruction &inst); void op_setfmod(Assembler &a, const uml::instruction &inst); void op_getfmod(Assembler &a, const uml::instruction &inst); void op_getexp(Assembler &a, const uml::instruction &inst); void op_getflgs(Assembler &a, const uml::instruction &inst); void op_setflgs(Assembler &a, const uml::instruction &inst); void op_save(Assembler &a, const uml::instruction &inst); void op_restore(Assembler &a, const uml::instruction &inst); void op_load(Assembler &a, const uml::instruction &inst); void op_loads(Assembler &a, const uml::instruction &inst); void op_store(Assembler &a, const uml::instruction &inst); void op_read(Assembler &a, const uml::instruction &inst); void op_readm(Assembler &a, const uml::instruction &inst); void op_write(Assembler &a, const uml::instruction &inst); void op_writem(Assembler &a, const uml::instruction &inst); void op_carry(Assembler &a, const uml::instruction &inst); void op_set(Assembler &a, const uml::instruction &inst); void op_mov(Assembler &a, const uml::instruction &inst); void op_sext(Assembler &a, const uml::instruction &inst); void op_roland(Assembler &a, const uml::instruction &inst); void op_rolins(Assembler &a, const uml::instruction &inst); void op_add(Assembler &a, const uml::instruction &inst); void op_addc(Assembler &a, const uml::instruction &inst); void op_sub(Assembler &a, const uml::instruction &inst); void op_subc(Assembler &a, const uml::instruction &inst); void op_cmp(Assembler &a, const uml::instruction &inst); void op_mulu(Assembler &a, const uml::instruction &inst); void op_mululw(Assembler &a, const uml::instruction &inst); void op_muls(Assembler &a, const uml::instruction &inst); void op_mulslw(Assembler &a, const uml::instruction &inst); void op_divu(Assembler &a, const uml::instruction &inst); void op_divs(Assembler &a, const uml::instruction &inst); void op_and(Assembler &a, const uml::instruction &inst); void op_test(Assembler &a, const uml::instruction &inst); void op_or(Assembler &a, const uml::instruction &inst); void op_xor(Assembler &a, const uml::instruction &inst); void op_lzcnt(Assembler &a, const uml::instruction &inst); void op_tzcnt(Assembler &a, const uml::instruction &inst); void op_bswap(Assembler &a, const uml::instruction &inst); void op_shl(Assembler &a, const uml::instruction &inst); void op_shr(Assembler &a, const uml::instruction &inst); void op_sar(Assembler &a, const uml::instruction &inst); void op_ror(Assembler &a, const uml::instruction &inst); void op_rol(Assembler &a, const uml::instruction &inst); void op_rorc(Assembler &a, const uml::instruction &inst); void op_rolc(Assembler &a, const uml::instruction &inst); void op_fload(Assembler &a, const uml::instruction &inst); void op_fstore(Assembler &a, const uml::instruction &inst); void op_fread(Assembler &a, const uml::instruction &inst); void op_fwrite(Assembler &a, const uml::instruction &inst); void op_fmov(Assembler &a, const uml::instruction &inst); void op_ftoint(Assembler &a, const uml::instruction &inst); void op_ffrint(Assembler &a, const uml::instruction &inst); void op_ffrflt(Assembler &a, const uml::instruction &inst); void op_frnds(Assembler &a, const uml::instruction &inst); void op_fadd(Assembler &a, const uml::instruction &inst); void op_fsub(Assembler &a, const uml::instruction &inst); void op_fcmp(Assembler &a, const uml::instruction &inst); void op_fmul(Assembler &a, const uml::instruction &inst); void op_fdiv(Assembler &a, const uml::instruction &inst); void op_fneg(Assembler &a, const uml::instruction &inst); void op_fabs(Assembler &a, const uml::instruction &inst); void op_fsqrt(Assembler &a, const uml::instruction &inst); void op_frecip(Assembler &a, const uml::instruction &inst); void op_frsqrt(Assembler &a, const uml::instruction &inst); void op_fcopyi(Assembler &a, const uml::instruction &inst); void op_icopyf(Assembler &a, const uml::instruction &inst); // 32-bit code emission helpers void emit_mov_r32_p32(Assembler &a, Gp const ®, be_parameter const ¶m); void emit_mov_r32_p32_keepflags(Assembler &a, Gp const ®, be_parameter const ¶m); void emit_mov_m32_p32(Assembler &a, Mem memref, be_parameter const ¶m);
// license:MIT
// copyright-holders:Ole Christian Eidheim, Miodrag Milanovic
#ifndef MAME_LIB_UTIL_SERVER_HTTP_IMPL_HPP
#define MAME_LIB_UTIL_SERVER_HTTP_IMPL_HPP

#if defined(_MSC_VER)
#pragma warning(disable:4503)
#endif

#include "server_http.hpp"
#include "asio.h"
#include <asio/system_timer.hpp>
#include "path_to_regex.hpp"

#include <map>
#include <unordered_map>
#include <thread>
#include <functional>
#include <iostream>
#include <sstream>
#include <regex>

namespace webpp {
	template <class socket_type>
	class Server;

	template <class socket_type>
	class ServerBase {
	public:
		virtual ~ServerBase() {}

		class Response : public webpp::Response {
			friend class ServerBase<socket_type>;

			asio::streambuf m_streambuf;
			std::shared_ptr<socket_type> m_socket;

			std::ostream m_ostream;
			std::stringstream m_header;

			explicit Response(const std::shared_ptr<socket_type> &socket) : m_socket(socket), m_ostream(&m_streambuf) {}

			static std::string statusToString(int status)
			{
				switch (status) {
					default:
					case 200: return "HTTP/1.0 200 OK\r\n";
					case 201: return "HTTP/1.0 201 Created\r\n";
					case 202: return "HTTP/1.0 202 Accepted\r\n";
					case 204: return "HTTP/1.0 204 No Content\r\n";
					case 300: return "HTTP/1.0 300 Multiple Choices\r\n";
					case 301: return "HTTP/1.0 301 Moved Permanently\r\n";
					case 302: return "HTTP/1.0 302 Moved Temporarily\r\n";
					case 304: return "HTTP/1.0 304 Not Modified\r\n";
					case 400: return "HTTP/1.0 400 Bad Request\r\n";
					case 401: return "HTTP/1.0 401 Unauthorized\r\n";
					case 403: return "HTTP/1.0 403 Forbidden\r\n";
					case 404: return "HTTP/1.0 404 Not Found\r\n";
					case 500: return "HTTP/1.0 500 Internal Server Error\r\n";
					case 501: return "HTTP/1.0 501 Not Implemented\r\n";
					case 502: return "HTTP/1.0 502 Bad Gateway\r\n";
					case 504: return "HTTP/1.0 503 Service Unavailable\r\n";
				}
			}
		public:
			virtual Response& status(int number) { m_ostream << statusToString(number); return *this; }
			virtual void type(std::string str) { m_header << "Content-Type: "<< str << "\r\n"; }
			virtual void send(std::string str) { m_ostream << m_header.str() << "Content-Length: " << str.length() << "\r\n\r\n" << str; }
			virtual size_t size() const { return m_streambuf.size(); }
			std::shared_ptr<socket_type> socket() { return m_socket; }

			/// If true, force server to close the connection after the response have been sent.
			///
			/// This is useful when implementing a HTTP/1.0-server sending content
			/// without specifying the content length.
			bool close_connection_after_response = false;
			virtual ~Response() {}
		};

		class Content : public std::istream {
			friend class ServerBase<socket_type>;
		public:
			size_t size() const {
				return streambuf.size();
			}
			std::string string() const {
				std::stringstream ss;
				ss << rdbuf();
				return ss.str();
			}
		private:
			asio::streambuf &streambuf;
			explicit Content(asio::streambuf &streambuf): std::istream(&streambuf), streambuf(streambuf) {}
		};

		class Request : public webpp::Request {
			friend class ServerBase<socket_type>;
			friend class Server<socket_type>;
		public:
			Content content;

			virtual ~Request() {}
		private:
			Request(const socket_type &socket): content(streambuf) {
				try {
					remote_endpoint_address=socket.lowest_layer().remote_endpoint().address().to_string();
					remote_endpoint_port=socket.lowest_layer().remote_endpoint().port();
				}
				catch(...) {}
			}
			asio::streambuf streambuf;
		};

		class Config {
			friend class ServerBase<socket_type>;

			Config(unsigned short port) : port(port) {}
		public:
			/// Port number to use. Defaults to 80 for HTTP and 443 for HTTPS.
			unsigned short port;
			/// Number of threads that the server will use when start() is called. Defaults to 1 thread.
			size_t thread_pool_size=1;
			/// Timeout on request handling. Defaults to 5 seconds.
			size_t timeout_request=5;
			/// Timeout on content handling. Defaults to 300 seconds.
			size_t timeout_content=300;
			/// IPv4 address in dotted decimal form or IPv6 address in hexadecimal notation.
			/// If empty, the address will be any address.
			std::string address;
			/// Set to false to avoid binding the socket to an address that is already in use. Defaults to true.
			bool reuse_address=true;
		};
		///Set before calling start().
		Config m_config;
		private:
		  class regex_orderable : public std::regex {
			  std::string str;
		  public:
			  regex_orderable(std::regex reg, const std::string &regex_str) : std::regex(reg), str(regex_str) {}
			  bool operator<(const regex_orderable &rhs) const {
				  return str<rhs.str;
			  }
			  std::string getstr() const { return str; }
		  };
		using http_handler = std::function<void(std::shared_ptr<Response>, std::shared_ptr<Request>)>;

	public:
		template<class T> void on_get(std::string regex, T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); path2regex::Keys keys; auto reg = path2regex::path_to_regex(regex, keys); m_resource[regex_orderable(reg,regex)]["GET"] = std::make_tuple(std::move(keys), func); }
		template<class T> void on_get(T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); m_default_resource["GET"] = func; }
		template<class T> void on_post(std::string regex, T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); path2regex::Keys keys; auto reg = path2regex::path_to_regex(regex, keys); m_resource[regex_orderable(reg, regex)]["POST"] = std::make_tuple(std::move(keys), func); }
		template<class T> void on_post(T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); m_default_resource["POST"] = func; }
		template<class T> void on_put(std::string regex, T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex);  path2regex::Keys keys; auto reg = path2regex::path_to_regex(regex, keys); m_resource[regex_orderable(reg, regex)]["PUT"] = std::make_tuple(std::move(keys), func); }
		template<class T> void on_put(T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex);  m_default_resource["PUT"] = func; }
		template<class T> void on_patch(std::string regex, T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); path2regex::Keys keys; auto reg = path2regex::path_to_regex(regex, keys); m_resource[regex_orderable(reg, regex)]["PATCH"] = std::make_tuple(std::move(keys), func); }
		template<class T> void on_patch(T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); m_default_resource["PATCH"] = func; }
		template<class T> void on_delete(std::string regex, T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); path2regex::Keys keys; auto reg = path2regex::path_to_regex(regex, keys); m_resource[regex_orderable(reg, regex)]["DELETE"] = std::make_tuple(std::move(keys), func); }
		template<class T> void on_delete(T&& func) { std::lock_guard<std::mutex> lock(m_resource_mutex); m_default_resource["DELETE"] = func; }

		void remove_handler(std::string regex)
		{
			std::lock_guard<std::mutex> lock(m_resource_mutex);
			for (auto it = m_resource.begin(); it != m_resource.end(); ++it)
			{
				if (it->first.getstr() == regex)
				{
					m_resource.erase(it);
					break;
				}
			}
		}
		void clear()
		{
			std::lock_guard<std::mutex> lock(m_resource_mutex);
			m_resource.clear();
		}

		std::function<void(std::shared_ptr<typename ServerBase<socket_type>::Request>, const std::error_code&)> on_error;

		std::function<void(std::shared_ptr<socket_type> socket, std::shared_ptr<typename ServerBase<socket_type>::Request>)> on_upgrade;
	private:
		/// Warning: do not access (red or write) m_resources without holding m_resource_mutex
		std::map<regex_orderable, std::map<std::string, std::tuple<path2regex::Keys, http_handler>>>  m_resource;
		std::mutex m_resource_mutex;

		std::map<std::string, http_handler> m_default_resource;

	public:
		virtual void start() {
			if(!m_io_context)
				m_io_context=std::make_shared<asio::io_context>();

			if(m_io_context->stopped())
				m_io_context.reset();

			asio::ip::tcp::endpoint endpoint;
			if(m_config.address.size()>0)
				endpoint=asio::ip::tcp::endpoint(asio::ip::make_address(m_config.address), m_config.port);
			else
				endpoint=asio::ip::tcp::endpoint(asio::ip::tcp::v4(), m_config.port);

			if(!acceptor)
				acceptor= std::make_unique<asio::ip::tcp::acceptor>(*m_io_context);
			acceptor->open(endpoint.protocol());
			acceptor->set_option(asio::socket_base::reuse_address(m_config.reuse_address));
			acceptor->bind(endpoint);
			acceptor->listen();

			accept();

			if (!m_external_context)
				m_io_context->run();
		}

		void stop() const
		{
			acceptor->close();
			if (!m_external_context)
				m_io_context->stop();
		}

		///Use this function if you need to recursively send parts of a longer message
		void send(const std::shared_ptr<Response> &response, const std::function<void(const std::error_code&)>& callback=nullptr) const {
			asio::async_write(*response->socket(), response->m_streambuf, [response, callback](const std::error_code& ec, size_t /*bytes_transferred*/) {
				if(callback)
					callback(ec);
			});
		}

		void set_io_context(std::shared_ptr<asio::io_context> new_io_context)
		{
			m_io_context = new_io_context;
			m_external_context = true;
		}
	protected:
		std::shared_ptr<asio::io_context> m_io_context;
		bool m_external_context;
		std::unique_ptr<asio::ip::tcp::acceptor> acceptor;
		std::vector<std::thread> threads;

		ServerBase(unsigned short port) : m_config(port), m_external_context(false) {}

		virtual void accept()=0;

		std::shared_ptr<asio::system_timer> get_timeout_timer(const std::shared_ptr<socket_type> &socket, long seconds) {
			if(seconds==0)
				return nullptr;
			auto timer = std::make_shared<asio::system_timer>(*m_io_context);
			timer->expires_at(std::chrono::system_clock::now() + std::chrono::seconds(seconds));
			timer->async_wait([socket](const std::error_code& ec){
				if(!ec) {
					std::error_code newec = ec;
					socket->lowest_layer().shutdown(asio::ip::tcp::socket::shutdown_both, newec);
					socket->lowest_layer().close();
				}
			});
			return timer;
		}

		void read_request_and_content(const std::shared_ptr<socket_type> &socket) {
			//Create new streambuf (Request::streambuf) for async_read_until()
			//shared_ptr is used to pass temporary objects to the asynchronous functions
			std::shared_ptr<Request> request(new Request(*socket));

			//Set timeout on the following asio::async-read or write function
			auto timer = get_timeout_timer(socket, m_config.timeout_request);

			asio::async_read_until(*socket, request->streambuf, "\r\n\r\n",
					[this, socket, request, timer](const std::error_code& ec, size_t bytes_transferred) {
				if(timer)
					timer->cancel();
				if(!ec) {
					//request->streambuf.size() is not necessarily the same as bytes_transferred, from Boost-docs:
					//"After a successful async_read_until operation, the streambuf may contain additional data beyond the delimiter"
					//The chosen solution is to extract lines from the stream directly when parsing the header. What is left of the
					//streambuf (maybe some bytes of the content) is appended to in the async_read-function below (for retrieving content).
					size_t num_additional_bytes=request->streambuf.size()-bytes_transferred;

					if (!parse_request(request))
						return;

					//If content, read that as well
					auto it = request->header.find("Content-Length");
					if (it != request->header.end()) {
						unsigned long long content_length;
						try {
							content_length = stoull(it->second);
						}
						catch (const std::exception &) {
							if (on_error)
								on_error(request, std::error_code(EPROTO, std::generic_category()));
							return;
						}
						if (content_length > num_additional_bytes) {
							//Set timeout on the following asio::async-read or write function
							auto timer2 = get_timeout_timer(socket, m_config.timeout_content);
							asio::async_read(*socket, request->streambuf,
								asio::transfer_exactly(size_t(content_length) - num_additional_bytes),
								[this, socket, request, timer2]
							(const std::error_code& ec, size_t /*bytes_transferred*/) {
								if (timer2)
									timer2->cancel();
								if (!ec)
									find_resource(socket, request);
								else if (on_error)
									on_error(request, ec);
							});
						}
						else {
							find_resource(socket, request);
						}
					}
					else {
						find_resource(socket, request);
					}
				}
				else if (on_error)
					on_error(request, ec);
			});
		}

		bool parse_request(const std::shared_ptr<Request> &request) const {
			std::string line;
			getline(request->content, line);
			size_t method_end;
			if((method_end=line.find(' '))!=std::string::npos) {
				size_t path_end;
				if((path_end=line.find(' ', method_end+1))!=std::string::npos) {
					request->method=line.substr(0, method_end);
					request->path=line.substr(method_end+1, path_end-method_end-1);

					size_t protocol_end;
					if((protocol_end=line.find('/', path_end+1))!=std::string::npos) {
						if(line.compare(path_end+1, protocol_end-path_end-1, "HTTP")!=0)
							return false;
						request->http_version=line.substr(protocol_end+1, line.size()-protocol_end-2);
					}
					else
						return false;

					getline(request->content, line);
					size_t param_end;
					while((param_end=line.find(':'))!=std::string::npos) {
						size_t value_start=param_end+1;
						if((value_start)<line.size()) {
							if(line[value_start]==' ')
								value_start++;
							if(value_start<line.size())
								request->header.emplace(line.substr(0, param_end), line.substr(value_start, line.size() - value_start - 1));
						}

						getline(request->content, line);
					}
				}
				else
					return false;
			}
			else
				return false;
			return true;
		}

		void find_resource(const std::shared_ptr<socket_type> &socket, const std::shared_ptr<Request> &request) {
			std::lock_guard<std::mutex> lock(m_resource_mutex);
			//Upgrade connection
			if(on_upgrade) {
				auto it=request->header.find("Upgrade");
				if(it!=request->header.end()) {
					on_upgrade(socket, request);
					return;
				}
			}
			//Find path- and method-match, and call write_response
			for(auto& regex_method : m_resource) {
				auto it = regex_method.second.find(request->method);
				if (it != regex_method.second.end()) {
						std::smatch sm_res;
						if (std::regex_match(request->path, sm_res, regex_method.first)) {
							request->keys = std::get<0>(it->second);
							for (size_t i = 0; i < request->keys.size(); i++) {
								request->params.insert(std::pair<std::string, std::string>(request->keys[i].name, sm_res[i + 1]));
							}
							write_response(socket, request, std::get<1>(it->second));
							return;
						}
				}
			}
			auto it=m_default_resource.find(request->method);
			if(it!=m_default_resource.end()) {
				write_response(socket, request, it->second);
			}
		}

		void write_response(const std::shared_ptr<socket_type> &socket, const std::shared_ptr<Request> &request, http_handler& resource_function) {
			//Set timeout on the following asio::async-read or write function
			auto timer = get_timeout_timer(socket, m_config.timeout_content);

			auto response=std::shared_ptr<Response>(new Response(socket), [this, request, timer](Response *response_ptr) {
				auto response=std::shared_ptr<Response>(response_ptr);
				send(response, [this, response, request, timer](const std::error_code& ec) {
					if (timer)
						timer->cancel();
					if (!ec) {
						float http_version;
						try {
							http_version = stof(request->http_version);
						}
						catch (const std::exception &) {
							if (on_error)
								on_error(request, std::error_code(EPROTO, std::generic_category()));
							return;
						}

						if (response->close_connection_after_response)
							return;

						auto range = request->header.equal_range("Connection");
						case_insensitive_equals check;
						for (auto it = range.first; it != range.second; ++it) {
							if (check(it->second, "close"))
								return;
						}
						if (http_version > 1.05)
							read_request_and_content(response->socket());
					}
					else if (on_error)
						on_error(request, ec);
				});
			});

			try {
				resource_function(response, request);
			}
			catch(const std::exception &) {
				if (on_error)
					on_error(request, std::error_code(EPROTO, std::generic_category()));
			}
		}
	};

	template<class socket_type>
	class Server : public ServerBase<socket_type> {
	public:
		Server(unsigned short port, size_t num_threads, long timeout_request, long timeout_send_or_receive)
			: ServerBase<socket_type>(port, num_threads, timeout_request, timeout_send_or_receive)
		{
		}
	};

	using HTTP = asio::ip::tcp::socket;

	template<>
	class Server<HTTP> : public ServerBase<HTTP> {
	public:
		Server() : ServerBase<HTTP>::ServerBase(80) {}
	protected:
		void accept() override {
			//Create new socket for this connection
			//Shared_ptr is used to pass temporary objects to the asynchronous functions
			auto socket = std::make_shared<HTTP>(*m_io_context);

			acceptor->async_accept(*socket, [this, socket](const std::error_code& ec){
				//Immediately start accepting a new connection (if io_context hasn't been stopped)
				if (ec != asio::error::operation_aborted)
					accept();

				if(!ec) {
					asio::ip::tcp::no_delay option(true);
					socket->set_option(option);

					read_request_and_content(socket);
				} else if (on_error)
					on_error(std::shared_ptr<Request>(new Request(*socket)), ec);
			});
		}
	};

	class http_server : public Server<HTTP> {
	public:
		http_server() : Server<HTTP>::Server() {}
	};
}
#endif  /* MAME_LIB_UTIL_SERVER_HTTP_IMPL_HPP */
pacesizep.size() == SIZE_DWORD) { if (USE_THISCALL) a.mov(ecx, imm(accessors.write_dword.obj)); else a.mov(dword_ptr(esp, 0), imm(accessors.write_dword.obj)); a.call(imm(accessors.write_dword.func)); if (USE_THISCALL) a.sub(esp, 8); } else if (spacesizep.size() == SIZE_QWORD) { if (USE_THISCALL) a.mov(ecx, imm(accessors.write_qword.obj)); else a.mov(dword_ptr(esp, 0), imm(accessors.write_qword.obj)); a.call(imm(accessors.write_qword.func)); if (USE_THISCALL) a.sub(esp, 12); } } //------------------------------------------------- // op_writem - process a WRITEM opcode //------------------------------------------------- void drcbe_x86::op_writem(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter addrp(*this, inst.param(0), PTYPE_MRI); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); be_parameter maskp(*this, inst.param(2), PTYPE_MRI); parameter const &spacesizep = inst.param(3); assert(spacesizep.is_size_space()); // set up a call to the write byte handler auto const &accessors = m_memory_accessors[spacesizep.space()]; if (spacesizep.size() != SIZE_QWORD) { emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 8 : 12), maskp); emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 4 : 8), srcp); } else { emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 12 : 16), maskp); emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 4 : 8), srcp); } emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp); if (spacesizep.size() == SIZE_BYTE) { if (USE_THISCALL) a.mov(ecx, imm(accessors.write_byte_masked.obj)); else a.mov(dword_ptr(esp, 0), imm(accessors.write_byte_masked.obj)); a.call(imm(accessors.write_byte_masked.func)); if (USE_THISCALL) a.sub(esp, 12); } else if (spacesizep.size() == SIZE_WORD) { if (USE_THISCALL) a.mov(ecx, imm(accessors.write_word_masked.obj)); else a.mov(dword_ptr(esp, 0), imm(accessors.write_word_masked.obj)); a.call(imm(accessors.write_word_masked.func)); if (USE_THISCALL) a.sub(esp, 12); } else if (spacesizep.size() == SIZE_DWORD) { if (USE_THISCALL) a.mov(ecx, imm(accessors.write_dword_masked.obj)); else a.mov(dword_ptr(esp, 0), imm(accessors.write_dword_masked.obj)); a.call(imm(accessors.write_dword_masked.func)); if (USE_THISCALL) a.sub(esp, 12); } else if (spacesizep.size() == SIZE_QWORD) { if (USE_THISCALL) a.mov(ecx, imm(accessors.write_qword_masked.obj)); else a.mov(dword_ptr(esp, 0), imm(accessors.write_qword_masked.obj)); a.call(imm(accessors.write_qword_masked.func)); if (USE_THISCALL) a.sub(esp, 20); } } //------------------------------------------------- // op_carry - process a CARRY opcode //------------------------------------------------- void drcbe_x86::op_carry(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C); // normalize parameters be_parameter srcp(*this, inst.param(0), PTYPE_MRI); be_parameter bitp(*this, inst.param(1), PTYPE_MRI); // degenerate case: source is immediate if (srcp.is_immediate() && bitp.is_immediate()) { if (srcp.immediate() & ((uint64_t)1 << (bitp.immediate() & (inst.size() * 8 - 1)))) a.stc(); else a.clc(); return; } // load non-immediate bit numbers into a register if (!bitp.is_immediate()) { emit_mov_r32_p32(a, ecx, bitp); a.and_(ecx, inst.size() * 8 - 1); } // 32-bit form if (inst.size() == 4) { if (srcp.is_immediate()) emit_mov_r32_p32(a, edx, srcp); if (bitp.is_immediate()) { if (srcp.is_memory()) a.bt(MABS(srcp.memory(), 4), (bitp.immediate() & (inst.size() * 8 - 1))); else if (srcp.is_int_register()) a.bt(Gpd(srcp.ireg()), (bitp.immediate() & (inst.size() * 8 - 1))); else if (srcp.is_immediate()) a.bt(edx, (bitp.immediate() & (inst.size() * 8 - 1))); } else { if (srcp.is_memory()) a.bt(MABS(srcp.memory()), ecx); else if (srcp.is_int_register()) a.bt(Gpd(srcp.ireg()), ecx); else if (srcp.is_immediate()) a.bt(edx, ecx); } } // 64-bit form else { if (srcp.is_immediate()) emit_mov_r64_p64(a, ebx, edx, srcp); if (bitp.is_immediate()) { const uint32_t bitshift = bitp.immediate() & (inst.size() * 8 - 1); if (bitshift < 32) { if (srcp.is_memory()) a.bt(MABS(srcp.memory(), 4), bitshift); else if (srcp.is_int_register()) a.bt(Gpd(srcp.ireg()), bitshift); else if (srcp.is_immediate()) a.bt(ebx, bitshift); } else if (bitshift >= 32) { if (srcp.is_memory()) a.bt(MABS((uint8_t*)srcp.memory() + 4, 4), bitshift - 32); else if (srcp.is_int_register()) a.bt(MABS(m_reghi[srcp.ireg()], 4), bitshift - 32); else if (srcp.is_immediate()) a.bt(edx, bitshift); } } else { Label end = a.newLabel(); Label higher = a.newLabel(); a.cmp(ecx, 32); a.short_().jge(higher); if (srcp.is_memory()) { a.bt(MABS(srcp.memory(), 4), ecx); } else if (srcp.is_int_register()) { a.mov(MABS(m_reglo[srcp.ireg()], 4), Gpd(srcp.ireg())); // mov [srcp.lo],srcp a.bt(MABS(m_reglo[srcp.ireg()], 4), ecx); // bt [srcp],ecx } else if (srcp.is_immediate()) { a.bt(ebx, ecx); } a.short_().jmp(end); a.bind(higher); a.sub(ecx, 32); if (srcp.is_memory()) { a.bt(MABS((uint8_t*)srcp.memory() + 4, 4), ecx); } else if (srcp.is_int_register()) { a.bt(MABS(m_reghi[srcp.ireg()], 4), ecx); } else if (srcp.is_immediate()) { a.bt(edx, ecx); } a.bind(end); } } } //------------------------------------------------- // op_set - process a SET opcode //------------------------------------------------- void drcbe_x86::op_set(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_any_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax); // set to AL a.set(X86_CONDITION(inst.condition()), al); // setcc al a.movzx(dstreg, al); // movzx dstreg,al // store low 32 bits emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form stores upper 32 bits if (inst.size() == 8) { // general case if (dstp.is_memory()) a.mov(MABS(dstp.memory(4), 4), 0); // mov [dstp+4],0 else if (dstp.is_int_register()) a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov [reghi],0 } } //------------------------------------------------- // op_mov - process a MOV opcode //------------------------------------------------- void drcbe_x86::op_mov(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_any_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax); // add a conditional branch unless a conditional move is possible Label skip; if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate()))) { skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip } // 32-bit form if (inst.size() == 4) { // register to memory if (dstp.is_memory() && srcp.is_int_register()) a.mov(MABS(dstp.memory()), Gpd(srcp.ireg())); // mov [dstp],srcp // immediate to memory else if (dstp.is_memory() && srcp.is_immediate()) a.mov(MABS(dstp.memory(), 4), srcp.immediate()); // mov [dstp],srcp // conditional memory to register else if (inst.condition() != uml::COND_ALWAYS && dstp.is_int_register() && srcp.is_memory()) a.cmov(X86_CONDITION(inst.condition()), Gpd(dstp.ireg()), MABS(srcp.memory())); // cmovcc dstp,[srcp] // conditional register to register else if (inst.condition() != uml::COND_ALWAYS && dstp.is_int_register() && srcp.is_int_register()) a.cmov(X86_CONDITION(inst.condition()), Gpd(dstp.ireg()), Gpd(srcp.ireg())); // cmovcc dstp,srcp // general case else { emit_mov_r32_p32_keepflags(a, dstreg, srcp); // mov dstreg,srcp emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // register to memory if (dstp.is_memory() && srcp.is_int_register()) { a.mov(eax, MABS(m_reghi[srcp.ireg()])); // mov eax,reghi[srcp] a.mov(MABS(dstp.memory(0)), Gpd(srcp.ireg())); // mov [dstp],srcp a.mov(MABS(dstp.memory(4)), eax); // mov [dstp+4],eax } // immediate to memory else if (dstp.is_memory() && srcp.is_immediate()) { a.mov(MABS(dstp.memory(0), 4), srcp.immediate()); // mov [dstp],srcp a.mov(MABS(dstp.memory(4), 4), srcp.immediate() >> 32); // mov [dstp+4],srcp >> 32 } // general case else { emit_mov_r64_p64_keepflags(a, dstreg, edx, srcp); // mov edx:dstreg,srcp emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } // bind the label if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate()))) { a.bind(skip); reset_last_upper_lower_reg(); } } //------------------------------------------------- // op_sext - process a SEXT opcode //------------------------------------------------- void drcbe_x86::op_sext(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_S | FLAG_Z); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); parameter const &sizep = inst.param(2); assert(sizep.is_size()); // pick a target register for the general case Gp const dstreg = eax; // convert 8-bit source registers to EAX if (sizep.size() == SIZE_BYTE && srcp.is_int_register() && (srcp.ireg() & 4)) { a.mov(eax, Gpd(srcp.ireg())); // mov eax,srcp srcp = be_parameter::make_ireg(eax.id()); } // general case if (srcp.is_memory()) { if (sizep.size() == SIZE_BYTE) a.movsx(dstreg, MABS(srcp.memory(), 1)); // movsx dstreg,[srcp] else if (sizep.size() == SIZE_WORD) a.movsx(dstreg, MABS(srcp.memory(), 2)); // movsx dstreg,[srcp] else if (sizep.size() == SIZE_DWORD) a.mov(dstreg, MABS(srcp.memory())); // mov dstreg,[srcp] } else if (srcp.is_int_register()) { if (sizep.size() == SIZE_BYTE) a.movsx(dstreg, GpbLo(srcp.ireg())); // movsx dstreg,srcp else if (sizep.size() == SIZE_WORD) a.movsx(dstreg, Gpw(srcp.ireg())); // movsx dstreg,srcp else if (sizep.size() == SIZE_DWORD && dstreg.id() != srcp.ireg()) a.mov(dstreg, Gpd(srcp.ireg())); // mov dstreg,srcp } else if (srcp.is_immediate()) { if (sizep.size() == SIZE_BYTE) a.mov(dstreg, (int8_t)srcp.immediate()); else if (sizep.size() == SIZE_WORD) a.mov(dstreg, (int16_t)srcp.immediate()); else if (sizep.size() == SIZE_DWORD) a.mov(dstreg, (int32_t)srcp.immediate()); } if (inst.flags() != 0) a.test(dstreg, dstreg); // test dstreg,dstreg // 32-bit form: store the low 32 bits if (inst.size() == 4) emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg // 64-bit form: sign extend to 64 bits and store edx:eax else if (inst.size() == 8) { a.cdq(); // cdq emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:eax } } //------------------------------------------------- // op_roland - process an ROLAND opcode //------------------------------------------------- void drcbe_x86::op_roland(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_S | FLAG_Z); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); be_parameter shiftp(*this, inst.param(2), PTYPE_MRI); be_parameter maskp(*this, inst.param(3), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, shiftp, maskp); // 32-bit form if (inst.size() == 4) { emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,srcp shift_op_param(a, Inst::kIdRol, inst.size(), dstreg, shiftp, // rol dstreg,shiftp [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, false); alu_op_param(a, Inst::kIdAnd, dstreg, maskp, // and dstreg,maskp [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && !src.immediate()) { a.xor_(dst.as(), dst.as()); return true; } else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) return true; return false; }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { emit_mov_r64_p64(a, dstreg, edx, srcp); // mov edx:dstreg,srcp emit_rol_r64_p64(a, dstreg, edx, shiftp, inst); // rol edx:dstreg,shiftp emit_and_r64_p64(a, dstreg, edx, maskp, inst); // and edx:dstreg,maskp emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_rolins - process an ROLINS opcode //------------------------------------------------- void drcbe_x86::op_rolins(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_S | FLAG_Z); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); be_parameter shiftp(*this, inst.param(2), PTYPE_MRI); be_parameter maskp(*this, inst.param(3), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(ecx, shiftp, maskp); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, eax, srcp); // mov eax,srcp shift_op_param(a, Inst::kIdRol, inst.size(), eax, shiftp, // rol eax,shiftp [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, false); emit_mov_r32_p32(a, dstreg, dstp); // mov dstreg,dstp if (maskp.is_immediate()) { a.and_(eax, maskp.immediate()); // and eax,maskp a.and_(dstreg, ~maskp.immediate()); // and dstreg,~maskp } else { emit_mov_r32_p32(a, edx, maskp); // mov edx,maskp a.and_(eax, edx); // and eax,edx a.not_(edx); // not edx a.and_(dstreg, edx); // and dstreg,edx } a.or_(dstreg, eax); // or dstreg,eax emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (inst.flags()) a.test(dstreg, dstreg); } else if (inst.size() == 8) { // 64-bit form emit_mov_r64_p64(a, eax, edx, srcp); // mov edx:eax,srcp emit_rol_r64_p64(a, eax, edx, shiftp, inst); // rol edx:eax,shiftp if (maskp.is_immediate()) { a.and_(eax, maskp.immediate()); // and eax,maskp a.and_(edx, maskp.immediate() >> 32); // and edx,maskp >> 32 if (dstp.is_int_register()) { a.and_(Gpd(dstp.ireg()), ~maskp.immediate()); // and dstp.lo,~maskp a.and_(MABS(m_reghi[dstp.ireg()], 4), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32 a.or_(Gpd(dstp.ireg()), eax); // or dstp.lo,eax a.or_(MABS(m_reghi[dstp.ireg()]), edx); // or dstp.hi,edx } else { a.and_(MABS(dstp.memory(0), 4), ~maskp.immediate()); // and dstp.lo,~maskp a.and_(MABS(dstp.memory(4), 4), ~maskp.immediate() >> 32); // and dstp.hi,~maskp >> 32 a.or_(MABS(dstp.memory(0)), eax); // or dstp.lo,eax a.or_(MABS(dstp.memory(4)), edx); // or dstp.hi,edx } } else { a.mov(ptr(esp, -8), ebx); // mov [esp-8],ebx emit_mov_r64_p64(a, ebx, ecx, maskp); // mov ecx:ebx,maskp a.and_(eax, ebx); // and eax,ebx a.and_(edx, ecx); // and edx,ecx a.not_(ebx); // not ebx a.not_(ecx); // not ecx if (dstp.is_int_register()) { if (dstp.ireg() == Gp::kIdBx) a.and_(ptr(esp, -8), ebx); // and dstp.lo,ebx else a.and_(Gpd(dstp.ireg()), ebx); // and dstp.lo,ebx a.and_(MABS(m_reghi[dstp.ireg()]), ecx); // and dstp.hi,ecx if (dstp.ireg() == Gp::kIdBx) a.or_(ptr(esp, -8), eax); // or dstp.lo,eax else a.or_(Gpd(dstp.ireg()), eax); // or dstp.lo,eax a.or_(MABS(m_reghi[dstp.ireg()]), edx); // or dstp.hi,edx } else { a.and_(MABS(dstp.memory(0)), ebx); // and dstp.lo,ebx a.and_(MABS(dstp.memory(4)), ecx); // and dstp.hi,ecx a.or_(MABS(dstp.memory(0)), eax); // or dstp.lo,eax a.or_(MABS(dstp.memory(4)), edx); // or dstp.hi,edx } a.mov(ebx, ptr(esp, -8)); // mov ebx,[esp-8] if (inst.flags()) { if (dstp.is_int_register()) calculate_status_flags(a, Gpd(dstp.ireg()), FLAG_Z); else calculate_status_flags(a, MABS(dstp.memory(0)), FLAG_Z); a.pushfd(); if (dstp.is_int_register()) calculate_status_flags(a, MABS(m_reghi[dstp.ireg()]), FLAG_S | FLAG_Z); else calculate_status_flags(a, MABS(dstp.memory(4)), FLAG_S | FLAG_Z); emit_combine_z_flags(a); } } } } //------------------------------------------------- // op_add - process a ADD opcode //------------------------------------------------- void drcbe_x86::op_add(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdAdd, MABS(dstp.memory(), 4), src2p, // add [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }); // reg = reg + imm else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && !inst.flags()) a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), src2p.immediate())); // lea dstp,[src1p+src2p] // reg = reg + reg else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_int_register() && !inst.flags()) a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), Gpd(src2p.ireg()))); // lea dstp,[src1p+src2p] // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdAdd, dstreg, src2p, // add dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdAdd, Inst::kIdAdc, // add [dstp],src2p MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] alu_op_param(a, Inst::kIdAdd, Inst::kIdAdc, // add edx:dstreg,src2p dstreg, edx, src2p, inst.flags() & FLAG_Z); emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_addc - process a ADDC opcode //------------------------------------------------- void drcbe_x86::op_addc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdAdc, MABS(dstp.memory(), 4), src2p); // adc [dstp],src2p // general case else { emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdAdc, dstreg, src2p); // adc dstreg,src2p emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdAdc, Inst::kIdAdc, // adc [dstp],src2p MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] alu_op_param(a, Inst::kIdAdc, Inst::kIdAdc, // adc edx:dstreg,src2p dstreg, edx, src2p, inst.flags() & FLAG_Z); emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_sub - process a SUB opcode //------------------------------------------------- void drcbe_x86::op_sub(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdSub, MABS(dstp.memory(), 4), src2p, // sub [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }); // reg = reg - imm else if (dstp.is_int_register() && src1p.is_int_register() && src2p.is_immediate() && !inst.flags()) a.lea(Gpd(dstp.ireg()), ptr(Gpd(src1p.ireg()), -src2p.immediate())); // lea dstp,[src1p-src2p] // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdSub, dstreg, src2p, // sub dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // sub [dstp],src2p MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // sub edx:dstreg,src2p dstreg, edx, src2p, inst.flags() & FLAG_Z); emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_subc - process a SUBC opcode //------------------------------------------------- void drcbe_x86::op_subc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdSbb, MABS(dstp.memory(), 4), src2p); // sbb [dstp],src2p // general case else { emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdSbb, dstreg, src2p); // sbb dstreg,src2p emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdSbb, Inst::kIdSbb, // sbb [dstp],src2p MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] alu_op_param(a, Inst::kIdSbb, Inst::kIdSbb, // sbb edx:dstreg,src2p dstreg, edx, src2p, inst.flags() & FLAG_Z); emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_cmp - process a CMP opcode //------------------------------------------------- void drcbe_x86::op_cmp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter src1p(*this, inst.param(0), PTYPE_MRI); be_parameter src2p(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case Gp const src1reg = src1p.select_register(eax); // 32-bit form if (inst.size() == 4) { // memory versus anything if (src1p.is_memory()) alu_op_param(a, Inst::kIdCmp, MABS(src1p.memory(), 4), src2p); // cmp [src1p],src2p // general case else { if (src1p.is_immediate()) a.mov(src1reg, src1p.immediate()); // mov src1reg,imm alu_op_param(a, Inst::kIdCmp, src1reg, src2p); // cmp src1reg,src2p } } // 64-bit form else { // general case emit_mov_r64_p64(a, eax, edx, src1p); // mov edx:eax,[src1p] alu_op_param(a, Inst::kIdSub, Inst::kIdSbb, // cmp edx:eax,src2p eax, edx, src2p, (inst.flags() & FLAG_Z) && (inst.flags() != FLAG_Z)); if (inst.flags() == FLAG_Z) a.or_(edx, eax); } } //------------------------------------------------- // op_mulu - process a MULU opcode //------------------------------------------------- void drcbe_x86::op_mulu(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter edstp(*this, inst.param(1), PTYPE_MR); be_parameter src1p(*this, inst.param(2), PTYPE_MRI); be_parameter src2p(*this, inst.param(3), PTYPE_MRI); normalize_commutative(src1p, src2p); const bool compute_hi = (dstp != edstp); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p a.mul(edx); // mul edx emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_hi) emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx if (inst.flags()) { a.pushfd(); a.test(edx, edx); a.pushfd(); // will have the sign flag + upper half zero a.pop(edx); a.test(eax, eax); a.pushfd(); // lower half zero a.pop(eax); a.and_(dword_ptr(esp, 0), ~(0x40 | 0x80)); a.mov(ecx, edx); a.and_(ecx, 0x80); // sign a.and_(eax, edx); a.and_(eax, 0x40); // zero a.or_(eax, ecx); a.or_(dword_ptr(esp, 0), eax); a.popfd(); } } else if (inst.size() == 8) { // 64-bit form a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_hi) a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // mov [esp+4],&reshi a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo a.call(imm(dmulu)); // call dmulu (calculate ZS flags as 64*64->128) if (inst.flags() != 0) a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4] a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_hi) { a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo a.mov(ecx, MABS((uint32_t *)&m_reshi + 1)); // mov ecx,reshi.hi emit_mov_p64_r64(a, edstp, eax, ecx); // mov edstp,ecx:eax } if (inst.flags() != 0) a.popfd(); // popf } } //------------------------------------------------- // op_mululw - process a MULULW (32x32=32) opcode //------------------------------------------------- void drcbe_x86::op_mululw(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p a.mul(edx); // mul edx emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (inst.flags()) { a.test(eax, eax); a.pushfd(); // sign + zero // if edx is not zero then it overflowed a.test(edx, edx); a.pushfd(); a.pop(edx); a.and_(edx, 0x40); // zero a.xor_(edx, 0x40); a.shl(edx, 5); // turn into overflow flag a.or_(dword_ptr(esp, 0), edx); a.popfd(); } } else if (inst.size() == 8) { // 64-bit form a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo a.call(imm(dmulu)); // call dmulu (calculate ZS flags as 64*64->64) if (inst.flags() != 0) a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4] a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (inst.flags() != 0) a.popfd(); // popf } } //------------------------------------------------- // op_muls - process a MULS opcode //------------------------------------------------- void drcbe_x86::op_muls(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter edstp(*this, inst.param(1), PTYPE_MR); be_parameter src1p(*this, inst.param(2), PTYPE_MRI); be_parameter src2p(*this, inst.param(3), PTYPE_MRI); normalize_commutative(src1p, src2p); const bool compute_hi = (dstp != edstp); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p a.imul(edx); // imul edx emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_hi) emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx if (inst.flags()) { a.pushfd(); a.test(edx, edx); a.pushfd(); // will have the sign flag + upper half zero a.pop(edx); a.test(eax, eax); a.pushfd(); // lower half zero a.pop(eax); a.and_(dword_ptr(esp, 0), ~(0x40 | 0x80)); a.mov(ecx, edx); a.and_(ecx, 0x80); // sign a.and_(eax, edx); a.and_(eax, 0x40); // zero a.or_(eax, ecx); a.or_(dword_ptr(esp, 0), eax); a.popfd(); } } else if (inst.size() == 8) { // 64-bit form a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_hi) a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo a.call(imm(dmuls)); // call dmuls (calculate ZS flags as 64*64->128) if (inst.flags() != 0) a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4] a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_hi) { a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax } if (inst.flags() != 0) a.popfd(); // popf } } //------------------------------------------------- // op_mulslw - process a MULSLW (32x32=32) opcode //------------------------------------------------- void drcbe_x86::op_mulslw(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p emit_mov_r32_p32(a, edx, src2p); // mov edx,src2p a.imul(edx); // imul edx emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (inst.flags()) { a.test(eax, eax); a.pushfd(); // sign + zero a.mov(ecx, edx); a.cdq(); a.cmp(ecx, edx); a.pushfd(); a.pop(edx); a.and_(edx, 0x40); // zero a.xor_(edx, 0x40); a.shl(edx, 5); // turn into overflow flag a.or_(dword_ptr(esp, 0), edx); a.popfd(); } } else if (inst.size() == 8) { // 64-bit form a.mov(dword_ptr(esp, 24), inst.flags() ? 1 : 0); // mov [esp+24],flags emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo a.call(imm(dmuls)); // call dmuls (calculate ZS flags as 64*64->64) if (inst.flags() != 0) a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4] a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (inst.flags() != 0) a.popfd(); // popf } } //------------------------------------------------- // op_divu - process a DIVU opcode //------------------------------------------------- void drcbe_x86::op_divu(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter edstp(*this, inst.param(1), PTYPE_MR); be_parameter src1p(*this, inst.param(2), PTYPE_MRI); be_parameter src2p(*this, inst.param(3), PTYPE_MRI); bool compute_rem = (dstp != edstp); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, ecx, src2p); // mov ecx,src2p if (inst.flags() != 0) { a.mov(eax, 0xa0000000); // mov eax,0xa0000000 a.add(eax, eax); // add eax,eax } Label skip = a.newLabel(); a.short_().jecxz(skip); // jecxz skip emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p a.xor_(edx, edx); // xor edx,edx a.div(ecx); // div ecx emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_rem) emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx if (inst.flags() != 0) a.test(eax, eax); // test eax,eax a.bind(skip); // skip: reset_last_upper_lower_reg(); } else if (inst.size() == 8) { // 64-bit form emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_rem) a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo a.call(imm(ddivu)); // call ddivu if (inst.flags() != 0) a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4] a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_rem) { a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax } if (inst.flags() != 0) a.popfd(); // popf } } //------------------------------------------------- // op_divs - process a DIVS opcode //------------------------------------------------- void drcbe_x86::op_divs(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_V | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter edstp(*this, inst.param(1), PTYPE_MR); be_parameter src1p(*this, inst.param(2), PTYPE_MRI); be_parameter src2p(*this, inst.param(3), PTYPE_MRI); bool compute_rem = (dstp != edstp); if (inst.size() == 4) { // 32-bit form emit_mov_r32_p32(a, ecx, src2p); // mov ecx,src2p if (inst.flags() != 0) { a.mov(eax, 0xa0000000); // mov eax,0xa0000000 a.add(eax, eax); // add eax,eax } Label skip = a.newLabel(); a.short_().jecxz(skip); // jecxz skip emit_mov_r32_p32(a, eax, src1p); // mov eax,src1p a.cdq(); // cdq a.idiv(ecx); // idiv ecx emit_mov_p32_r32(a, dstp, eax); // mov dstp,eax if (compute_rem) emit_mov_p32_r32(a, edstp, edx); // mov edstp,edx if (inst.flags() != 0) a.test(eax, eax); // test eax,eax a.bind(skip); // skip: reset_last_upper_lower_reg(); } else if (inst.size() == 8) { // 64-bit form emit_mov_m64_p64(a, qword_ptr(esp, 16), src2p); // mov [esp+16],src2p emit_mov_m64_p64(a, qword_ptr(esp, 8), src1p); // mov [esp+8],src1p if (!compute_rem) a.mov(dword_ptr(esp, 4), imm(&m_reslo)); // mov [esp+4],&reslo else a.mov(dword_ptr(esp, 4), imm(&m_reshi)); // push [esp+4],&reshi a.mov(dword_ptr(esp, 0), imm(&m_reslo)); // mov [esp],&reslo a.call(imm(ddivs)); // call ddivs if (inst.flags() != 0) a.push(dword_ptr(uintptr_t(flags_unmap), eax, 2)); // push flags_unmap[eax*4] a.mov(eax, MABS((uint32_t *)&m_reslo + 0)); // mov eax,reslo.lo a.mov(edx, MABS((uint32_t *)&m_reslo + 1)); // mov edx,reslo.hi emit_mov_p64_r64(a, dstp, eax, edx); // mov dstp,edx:eax if (compute_rem) { a.mov(eax, MABS((uint32_t *)&m_reshi + 0)); // mov eax,reshi.lo a.mov(edx, MABS((uint32_t *)&m_reshi + 1)); // mov edx,reshi.hi emit_mov_p64_r64(a, edstp, eax, edx); // mov edstp,edx:eax } if (inst.flags() != 0) a.popfd(); // popf } } //------------------------------------------------- // op_and - process a AND opcode //------------------------------------------------- void drcbe_x86::op_and(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdAnd, MABS(dstp.memory(), 4), src2p, // and [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && !src.immediate()) { a.mov(dst.as(), imm(0)); return true; } else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) return true; return false; }); // dstp == src2p in memory else if (dstp.is_memory() && dstp == src2p) alu_op_param(a, Inst::kIdAnd, MABS(dstp.memory(), 4), src1p, // and [dstp],src1p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && !src.immediate()) { a.mov(dst.as(), imm(0)); return true; } else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) return true; return false; }); // AND with immediate 0xff else if (src2p.is_immediate_value(0xff) && !inst.flags()) { if (src1p.is_int_register()) { if (src1p.ireg() & 4) { if (dstreg.id() != src1p.ireg()) a.mov(dstreg, Gpd(src1p.ireg())); // mov dstreg,src1p a.and_(dstreg, 0xff); // and dstreg,0xff } else a.movzx(dstreg, GpbLo(src1p.ireg())); // movzx dstreg,src1p } else if (src1p.is_memory()) a.movzx(dstreg, MABS(src1p.memory(), 1)); // movzx dstreg,[src1p] emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // AND with immediate 0xffff else if (src2p.is_immediate_value(0xffff) && !inst.flags()) { if (src1p.is_int_register()) a.movzx(dstreg, Gpw(src1p.ireg())); // movzx dstreg,src1p else if (src1p.is_memory()) a.movzx(dstreg, MABS(src1p.memory(), 2)); // movzx dstreg,[src1p] emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdAnd, dstreg, src2p, // and dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && !src.immediate()) { a.xor_(dst.as(), dst.as()); return true; } else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) return true; return false; }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) emit_and_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // and [dstp],src2p src2p, inst); // dstp == src2p in memory else if (dstp.is_memory() && dstp == src2p) emit_and_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // and [dstp],src1p src1p, inst); // AND with immediate 0xff else if (src2p.is_immediate_value(0xff) && !inst.flags()) { if (src1p.is_int_register()) { if (src1p.ireg() & 4) { if (dstreg.id() != src1p.ireg()) a.mov(dstreg, Gpd(src1p.ireg())); // mov dstreg,src1p a.and_(dstreg, 0xff); // and dstreg,0xff } else a.movzx(dstreg, GpbLo(src1p.ireg())); // movzx dstreg,src1p } else if (src1p.is_memory()) a.movzx(dstreg, MABS(src1p.memory(), 1)); // movzx dstreg,[src1p] emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // AND with immediate 0xffff else if (src2p.is_immediate_value(0xffff) && !inst.flags()) { if (src1p.is_int_register()) a.movzx(dstreg, Gpw(src1p.ireg())); // movzx dstreg,src1p else if (src1p.is_memory()) a.movzx(dstreg, MABS(src1p.memory(), 2)); // movzx dstreg,[src1p] emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // AND with immediate 0xffffffff else if (src2p.is_immediate_value(0xffffffffU) && !inst.flags()) { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // AND with immediate 0xffffffff00000000 else if (src2p.is_immediate_value(0xffffffff00000000ULL) && !inst.flags()) { if (src1p != dstp) { emit_mov_r64_p64(a, Gp(), edx, src1p); // mov dstreg,src1p emit_mov_p64_r64(a, dstp, Gp(), edx); // mov dstp,dstreg } if (dstp.is_int_register()) a.xor_(Gpd(dstp.ireg()), Gpd(dstp.ireg())); // xor dstlo,dstlo else if (dstp.is_memory()) a.mov(MABS(dstp.memory(0), 4), 0); // mov dstlo,0 } // AND with immediate <= 0xffffffff else if (src2p.is_immediate() && src2p.immediate() <= 0xffffffffU && !inst.flags()) { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdAnd, dstreg, src2p, // and dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && !src.immediate()) { a.xor_(dst.as(), dst.as()); return true; } else if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) return true; return false; }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg if (dstp.is_int_register()) a.mov(MABS(m_reghi[dstp.ireg()], 4), 0); // mov dsthi,0 else if (dstp.is_memory()) a.mov(MABS(dstp.memory(4), 4), 0); // mov dsthi,0 } // general case else { emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_and_r64_p64(a, dstreg, edx, src2p, inst); // and edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_test - process a TEST opcode //------------------------------------------------- void drcbe_x86::op_test(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter src1p(*this, inst.param(0), PTYPE_MRI); be_parameter src2p(*this, inst.param(1), PTYPE_MRI); normalize_commutative(src1p, src2p); // pick a target register for the general case Gp const src1reg = src1p.select_register(eax); // 32-bit form if (inst.size() == 4) { // src1p in memory if (src1p.is_memory()) alu_op_param(a, Inst::kIdTest, MABS(src1p.memory(), 4), src2p); // test [src1p],src2p // general case else { emit_mov_r32_p32(a, src1reg, src1p); // mov src1reg,src1p alu_op_param(a, Inst::kIdTest, src1reg, src2p); // test src1reg,src2p } } // 64-bit form else if (inst.size() == 8) { // src1p in memory if (src1p.is_memory()) alu_op_param(a, Inst::kIdTest, Inst::kIdTest, // test [dstp],src2p MABS(src1p.memory(0), 4), MABS(src1p.memory(4), 4), src2p, inst.flags() & FLAG_Z); // general case else { emit_mov_r64_p64(a, src1reg, edx, src1p); // mov src1reg:dstp,[src1p] alu_op_param(a, Inst::kIdTest, Inst::kIdTest, // test src1reg:dstp,src2p src1reg, edx, src2p, inst.flags() & FLAG_Z); } } } //------------------------------------------------- // op_or - process a OR opcode //------------------------------------------------- void drcbe_x86::op_or(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdOr, MABS(dstp.memory(), 4), src2p, // or [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) { a.mov(dst.as(), imm(-1)); return true; } else if (!inst.flags() && !src.immediate()) return true; return false; }); // dstp == src2p in memory else if (dstp.is_memory() && dstp == src2p) alu_op_param(a, Inst::kIdOr, MABS(dstp.memory(), 4), src1p, // or [dstp],src1p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) { a.mov(dst.as(), imm(-1)); return true; } else if (!inst.flags() && !src.immediate()) return true; return false; }); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdOr, dstreg, src2p, // or dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) { a.mov(dst.as(), imm(-1)); return true; } else if (!inst.flags() && !src.immediate()) return true; return false; }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) emit_or_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // or [dstp],src2p src2p, inst); // dstp == src2p in memory else if (dstp.is_memory() && dstp == src2p) emit_or_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // or [dstp],src1p src1p, inst); // general case else { emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_or_r64_p64(a, dstreg, edx, src2p, inst); // or edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_xor - process a XOR opcode //------------------------------------------------- void drcbe_x86::op_xor(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); normalize_commutative(src1p, src2p); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) alu_op_param(a, Inst::kIdXor, MABS(dstp.memory(), 4), src2p, // xor [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) { a.not_(dst.as()); return true; } else if (!inst.flags() && !src.immediate()) return true; return false; }); // dstp == src2p in memory else if (dstp.is_memory() && dstp == src2p) alu_op_param(a, Inst::kIdXor, MABS(dstp.memory(), 4), src1p, // xor [dstp],src1p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) { a.not_(dst.as()); return true; } else if (!inst.flags() && !src.immediate()) return true; return false; }); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p alu_op_param(a, Inst::kIdXor, dstreg, src2p, // xor dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize all-zero and all-one cases if (!inst.flags() && u32(src.immediate()) == 0xffffffffU) { a.not_(dst.as()); return true; } else if (!inst.flags() && !src.immediate()) return true; return false; }); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) emit_xor_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // xor [dstp],src2p src2p, inst); // dstp == src1p in memory else if (dstp.is_memory() && dstp == src2p) emit_xor_m64_p64(a, MABS(dstp.memory(0), 4), MABS(dstp.memory(4), 4), // xor [dstp],src1p src1p, inst); // general case else { emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_xor_r64_p64(a, dstreg, edx, src2p, inst); // xor edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } } //------------------------------------------------- // op_lzcnt - process a LZCNT opcode //------------------------------------------------- void drcbe_x86::op_lzcnt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax); if (inst.flags()) { a.xor_(eax, eax); // reset status flags a.test(eax, eax); } // 32-bit form if (inst.size() == 4) { emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p a.mov(ecx, 32 ^ 31); // mov ecx,32 ^ 31 a.bsr(dstreg, dstreg); // bsr dstreg,dstreg a.cmovz(dstreg, ecx); // cmovz dstreg,ecx a.xor_(dstreg, 31); // xor dstreg,31 emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg a.test(dstreg, dstreg); } // 64-bit form else if (inst.size() == 8) { emit_mov_r64_p64(a, dstreg, edx, srcp); // mov dstreg:edx,srcp Label skip = a.newLabel(); Label end = a.newLabel(); a.bsr(edx, edx); a.short_().jz(skip); a.xor_(edx, 31 ^ 63); a.mov(dstreg, edx); a.short_().jmp(end); a.bind(skip); a.mov(edx, 64 ^ 63); a.bsr(dstreg, dstreg); a.cmovz(dstreg, edx); a.bind(end); a.xor_(dstreg, 63); a.mov(edx, 0); emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg a.test(dstreg, dstreg); } } //------------------------------------------------- // op_tzcnt - process a TZCNT opcode //------------------------------------------------- void drcbe_x86::op_tzcnt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); Gp const dstreg = dstp.select_register(eax); if (inst.flags()) { a.xor_(eax, eax); // reset status flags a.test(eax, eax); } // 32-bit form if (inst.size() == 4) { emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p a.mov(ecx, 32); // mov ecx,32 a.bsf(dstreg, dstreg); // bsf dstreg,dstreg a.cmovz(dstreg, ecx); // cmovz dstreg,ecx emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg a.mov(ecx, dstreg); a.xor_(ecx, 32); } // 64-bit form else if (inst.size() == 8) { Label skip = a.newLabel(); emit_mov_r64_p64(a, dstreg, edx, srcp); // mov dstreg:edx,srcp a.bsf(dstreg, dstreg); // bsf dstreg,dstreg a.short_().jnz(skip); // jnz skip a.mov(ecx, 32); // mov ecx,32 a.bsf(dstreg, edx); // bsf dstreg,edx a.cmovz(dstreg, ecx); // cmovz dstreg,ecx a.add(dstreg, 32); // add dstreg,32 a.bind(skip); // skip: reset_last_upper_lower_reg(); a.xor_(edx, edx); // xor edx,edx emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg a.mov(ecx, dstreg); a.xor_(ecx, 64); } } //------------------------------------------------- // op_bswap - process a BSWAP opcode //------------------------------------------------- void drcbe_x86::op_bswap(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax); // 32-bit form if (inst.size() == 4) { emit_mov_r32_p32(a, dstreg, srcp); // mov dstreg,src1p a.bswap(dstreg); // bswap dstreg if (inst.flags() != 0) a.test(dstreg, dstreg); // test dstreg,dstreg emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } // 64-bit form else if (inst.size() == 8) { emit_mov_r64_p64(a, edx, dstreg, srcp); // mov dstreg:edx,srcp a.bswap(dstreg); // bswap dstreg a.bswap(edx); // bswap edx emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg if (inst.flags() == FLAG_Z) a.or_(edx, dstreg); // or edx,eax else if (inst.flags() == FLAG_S) a.test(edx, edx); // test edx,edx else { a.movzx(ecx, dstreg.r16()); // movzx ecx,dstreg a.or_(edx, ecx); // or edx,ecx a.mov(ecx, dstreg); // mov ecx,dstreg a.shr(ecx, 16); // shr ecx,16 a.or_(edx, ecx); // or edx,ecx } } } //------------------------------------------------- // op_shl - process a SHL opcode //------------------------------------------------- void drcbe_x86::op_shl(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdShl, inst.size(), MABS(dstp.memory(), 4), src2p, // shl [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdShl, inst.size(), dstreg, src2p, // shl dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_shl_r64_p64(a, dstreg, edx, src2p, inst); // shl edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_shr - process a SHR opcode //------------------------------------------------- void drcbe_x86::op_shr(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdShr, inst.size(), MABS(dstp.memory(), 4), src2p, // shr [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdShr, inst.size(), dstreg, src2p, // shr dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_shr_r64_p64(a, dstreg, edx, src2p, inst); // shr edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_sar - process a SAR opcode //------------------------------------------------- void drcbe_x86::op_sar(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdSar, inst.size(), MABS(dstp.memory(), 4), src2p, // sar [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdSar, inst.size(), dstreg, src2p, // sar dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_sar_r64_p64(a, dstreg, edx, src2p, inst); // sar edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_rol - process a rol opcode //------------------------------------------------- void drcbe_x86::op_rol(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdRol, inst.size(), MABS(dstp.memory(), 4), src2p, // rol [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdRol, inst.size(), dstreg, src2p, // rol dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_rol_r64_p64(a, dstreg, edx, src2p, inst); // rol edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_ror - process a ROR opcode //------------------------------------------------- void drcbe_x86::op_ror(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdRor, inst.size(), MABS(dstp.memory(), 4), src2p, // ror [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdRor, inst.size(), dstreg, src2p, // rol dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_ror_r64_p64(a, dstreg, edx, src2p, inst); // ror edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_rolc - process a ROLC opcode //------------------------------------------------- void drcbe_x86::op_rolc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdRcl, inst.size(), MABS(dstp.memory(), 4), src2p, // rcl [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdRcl, inst.size(), dstreg, src2p, // rcl dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_rcl_r64_p64(a, dstreg, edx, src2p, inst); // rcl edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //------------------------------------------------- // op_rorc - process a RORC opcode //------------------------------------------------- void drcbe_x86::op_rorc(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_S); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter src1p(*this, inst.param(1), PTYPE_MRI); be_parameter src2p(*this, inst.param(2), PTYPE_MRI); // pick a target register for the general case Gp const dstreg = dstp.select_register(eax, src2p); // 32-bit form if (inst.size() == 4) { // dstp == src1p in memory if (dstp.is_memory() && dstp == src1p) shift_op_param(a, Inst::kIdRcr, inst.size(), MABS(dstp.memory(), 4), src2p, // rcr [dstp],src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); // general case else { emit_mov_r32_p32_keepflags(a, dstreg, src1p); // mov dstreg,src1p shift_op_param(a, Inst::kIdRcr, inst.size(), dstreg, src2p, // rcr dstreg,src2p [inst](Assembler &a, Operand const &dst, be_parameter const &src) { // optimize zero case return (!inst.flags() && !src.immediate()); }, true); emit_mov_p32_r32(a, dstp, dstreg); // mov dstp,dstreg } } // 64-bit form else if (inst.size() == 8) { // general case emit_mov_r64_p64_keepflags(a, dstreg, edx, src1p); // mov edx:dstreg,[src1p] emit_rcr_r64_p64(a, dstreg, edx, src2p, inst); // rcr edx:dstreg,src2p emit_mov_p64_r64(a, dstp, dstreg, edx); // mov dstp,edx:dstreg } } //************************************************************************** // FLOATING POINT OPERATIONS //************************************************************************** //------------------------------------------------- // op_fload - process a FLOAD opcode //------------------------------------------------- void drcbe_x86::op_fload(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter basep(*this, inst.param(1), PTYPE_M); be_parameter indp(*this, inst.param(2), PTYPE_MRI); if (indp.is_immediate()) { // immediate index a.mov(eax, MABS(basep.memory(inst.size()*indp.immediate()))); if (inst.size() == 8) a.mov(edx, MABS(basep.memory(4 + inst.size()*indp.immediate()))); } else { // other index Gp const indreg = indp.select_register(ecx); emit_mov_r32_p32_keepflags(a, indreg, indp); a.mov(eax, ptr(uintptr_t(basep.memory(0)), indreg, (inst.size() == 8) ? 3 : 2)); if (inst.size() == 8) a.mov(edx, ptr(uintptr_t(basep.memory(4)), indreg, (inst.size() == 8) ? 3 : 2)); } // general case a.mov(MABS(dstp.memory(0)), eax); if (inst.size() == 8) a.mov(MABS(dstp.memory(4)), edx); } //------------------------------------------------- // op_fstore - process a FSTORE opcode //------------------------------------------------- void drcbe_x86::op_fstore(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter basep(*this, inst.param(0), PTYPE_M); be_parameter indp(*this, inst.param(1), PTYPE_MRI); be_parameter srcp(*this, inst.param(2), PTYPE_MF); a.mov(eax, MABS(srcp.memory(0))); if (inst.size() == 8) a.mov(edx, MABS(srcp.memory(4))); if (indp.is_immediate()) { // immediate index a.mov(MABS(basep.memory(inst.size()*indp.immediate())), eax); if (inst.size() == 8) a.mov(MABS(basep.memory(4 + inst.size()*indp.immediate())), edx); } else { // other index Gp const indreg = indp.select_register(ecx); emit_mov_r32_p32_keepflags(a, indreg, indp); a.mov(ptr(uintptr_t(basep.memory(0)), indreg, (inst.size() == 8) ? 3 : 2), eax); if (inst.size() == 8) a.mov(ptr(uintptr_t(basep.memory(4)), indreg, (inst.size() == 8) ? 3 : 2), edx); } } //------------------------------------------------- // op_fread - process a FREAD opcode //------------------------------------------------- void drcbe_x86::op_fread(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter addrp(*this, inst.param(1), PTYPE_MRI); parameter const &spacep = inst.param(2); assert(spacep.is_size_space()); assert((1 << spacep.size()) == inst.size()); // set up a call to the read dword/qword handler auto const &accessors = m_memory_accessors[spacep.space()]; auto const &accessor = (inst.size() == 4) ? accessors.read_dword : accessors.read_qword; emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp); if (USE_THISCALL) a.mov(ecx, imm(accessor.obj)); else a.mov(dword_ptr(esp, 0), imm(accessor.obj)); a.call(imm(accessor.func)); if (USE_THISCALL) a.sub(esp, 4); // store result if (inst.size() == 4) emit_mov_p32_r32(a, dstp, eax); else if (inst.size() == 8) emit_mov_p64_r64(a, dstp, eax, edx); } //------------------------------------------------- // op_fwrite - process a FWRITE opcode //------------------------------------------------- void drcbe_x86::op_fwrite(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter addrp(*this, inst.param(0), PTYPE_MRI); be_parameter srcp(*this, inst.param(1), PTYPE_MF); parameter const &spacep = inst.param(2); assert(spacep.is_size_space()); assert((1 << spacep.size()) == inst.size()); // set up a call to the write dword/qword handler auto const &accessors = m_memory_accessors[spacep.space()]; auto const &accessor = (inst.size() == 4) ? accessors.write_dword : accessors.write_qword; if (inst.size() == 4) emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 4 : 8), srcp); else if (inst.size() == 8) emit_mov_m64_p64(a, qword_ptr(esp, USE_THISCALL ? 4 : 8), srcp); emit_mov_m32_p32(a, dword_ptr(esp, USE_THISCALL ? 0 : 4), addrp); if (USE_THISCALL) a.mov(ecx, imm(accessor.obj)); else a.mov(dword_ptr(esp, 0), imm(accessor.obj)); a.call(imm(accessor.func)); if (USE_THISCALL) a.sub(esp, (inst.size() == 4) ? 8 : 12); } //------------------------------------------------- // op_fmov - process a FMOV opcode //------------------------------------------------- void drcbe_x86::op_fmov(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_any_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // always start with a jmp Label skip; if (inst.condition() != uml::COND_ALWAYS) { skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); } // general case a.mov(eax, MABS(srcp.memory(0))); if (inst.size() == 8) a.mov(edx, MABS(srcp.memory(4))); a.mov(MABS(dstp.memory(0)), eax); if (inst.size() == 8) a.mov(MABS(dstp.memory(4)), edx); // resolve the jump if (inst.condition() != uml::COND_ALWAYS) { a.bind(skip); reset_last_upper_lower_reg(); } } //------------------------------------------------- // op_ftoint - process a FTOINT opcode //------------------------------------------------- void drcbe_x86::op_ftoint(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MF); parameter const &sizep = inst.param(2); assert(sizep.is_size()); parameter const &roundp = inst.param(3); assert(roundp.is_rounding()); // set rounding mode if necessary if (roundp.rounding() != ROUND_DEFAULT && (!m_sse3 || roundp.rounding() != ROUND_TRUNC)) { a.fstcw(MABS(&m_fmodesave)); // fstcw [fmodesave] a.fldcw(MABS(&fp_control[roundp.rounding()])); // fldcw fpcontrol[roundp] } // general case emit_fld_p(a, inst.size(), srcp); // fld srcp // 4-byte integer case if (sizep.size() == SIZE_DWORD) { if (dstp.is_memory()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) a.fistp(MABS(dstp.memory(), 4)); // fistp [dstp] else a.fisttp(MABS(dstp.memory(), 4)); // fisttp [dstp] } else if (dstp.is_int_register()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) a.fistp(MABS(m_reglo[dstp.ireg()], 4)); // fistp reglo[dstp] else a.fisttp(MABS(m_reglo[dstp.ireg()], 4)); // fisttp reglo[dstp] a.mov(Gpd(dstp.ireg()), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp] } } // 8-byte integer case else if (sizep.size() == SIZE_QWORD) { if (dstp.is_memory()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) a.fistp(MABS(dstp.memory(), 8)); // fistp [dstp] else a.fisttp(MABS(dstp.memory(), 8)); // fisttp [dstp] } else if (dstp.is_int_register()) { if (!m_sse3 || roundp.rounding() != ROUND_TRUNC) a.fistp(MABS(m_reglo[dstp.ireg()], 8)); // fistp reglo[dstp] else a.fisttp(MABS(m_reglo[dstp.ireg()], 8)); // fisttp reglo[dstp] a.mov(Gpd(dstp.ireg()), MABS(m_reglo[dstp.ireg()])); // mov dstp,reglo[dstp] } } // restore control word and proceed if (roundp.rounding() != ROUND_DEFAULT && (!m_sse3 || roundp.rounding() != ROUND_TRUNC)) a.fldcw(MABS(&m_fmodesave)); // fldcw [fmodesave] } //------------------------------------------------- // op_ffrint - process a FFRINT opcode //------------------------------------------------- void drcbe_x86::op_ffrint(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MRI); parameter const &sizep = inst.param(2); assert(sizep.is_size()); // 4-byte integer case if (sizep.size() == SIZE_DWORD) { if (srcp.is_immediate()) { a.mov(MABS(&m_fptemp, 4), srcp.immediate()); // mov [fptemp],srcp a.fild(MABS(&m_fptemp, 4)); // fild [fptemp] } else if (srcp.is_memory()) a.fild(MABS(srcp.memory(), 4)); // fild [srcp] else if (srcp.is_int_register()) { a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov reglo[srcp],srcp a.fild(MABS(m_reglo[srcp.ireg()], 4)); // fild reglo[srcp] } } // 8-bit integer case else if (sizep.size() == SIZE_QWORD) { if (srcp.is_immediate()) { a.mov(MABS(&m_fptemp, 4), srcp.immediate()); // mov [fptemp],srcp a.mov(MABS((uint8_t *)&m_fptemp + 4, 4), srcp.immediate()); // mov [fptemp+4],srcp a.fild(MABS(&m_fptemp, 8)); // fild [fptemp] } else if (srcp.is_memory()) a.fild(MABS(srcp.memory(), 8)); // fild [srcp] else if (srcp.is_int_register()) { a.mov(MABS(m_reglo[srcp.ireg()]), Gpd(srcp.ireg())); // mov reglo[srcp],srcp a.fild(MABS(m_reglo[srcp.ireg()], 8)); // fild reglo[srcp] } } // store the result and be done emit_fstp_p(a, inst.size(), dstp); // fstp [dstp] } //------------------------------------------------- // op_ffrflt - process a FFRFLT opcode //------------------------------------------------- void drcbe_x86::op_ffrflt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); parameter const &sizep = inst.param(2); assert(sizep.is_size()); // general case if (sizep.size() == SIZE_DWORD) a.fld(MABS(srcp.memory(), 4)); // fld [srcp] else if (sizep.size() == SIZE_QWORD) a.fld(MABS(srcp.memory(), 8)); // fld [srcp] emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_frnds - process a FRNDS opcode //------------------------------------------------- void drcbe_x86::op_frnds(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case emit_fld_p(a, inst.size(), srcp); // fld srcp a.fstp(MABS(&m_fptemp, 4)); // fstp [fptemp] a.fld(MABS(&m_fptemp, 4)); // fld [fptemp] emit_fstp_p(a, inst.size(), dstp); // fstp [dstp] } //------------------------------------------------- // op_fadd - process a FADD opcode //------------------------------------------------- void drcbe_x86::op_fadd(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter src1p(*this, inst.param(1), PTYPE_MF); be_parameter src2p(*this, inst.param(2), PTYPE_MF); normalize_commutative(src1p, src2p); // general case emit_fld_p(a, inst.size(), src1p); // fld src1p emit_fld_p(a, inst.size(), src2p); // fld src2p a.faddp(); // faddp emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fsub - process a FSUB opcode //------------------------------------------------- void drcbe_x86::op_fsub(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter src1p(*this, inst.param(1), PTYPE_MF); be_parameter src2p(*this, inst.param(2), PTYPE_MF); // general case emit_fld_p(a, inst.size(), src1p); // fld src1p emit_fld_p(a, inst.size(), src2p); // fld src2p a.fsubp(); // fsubp emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fcmp - process a FCMP opcode //------------------------------------------------- void drcbe_x86::op_fcmp(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_flags(inst, FLAG_C | FLAG_Z | FLAG_U); // normalize parameters be_parameter src1p(*this, inst.param(0), PTYPE_MF); be_parameter src2p(*this, inst.param(1), PTYPE_MF); // general case emit_fld_p(a, inst.size(), src2p); // fld src2p emit_fld_p(a, inst.size(), src1p); // fld src1p a.fcompp(); // fcompp a.fnstsw(ax); // fnstsw ax a.sahf(); // sahf } //------------------------------------------------- // op_fmul - process a FMUL opcode //------------------------------------------------- void drcbe_x86::op_fmul(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter src1p(*this, inst.param(1), PTYPE_MF); be_parameter src2p(*this, inst.param(2), PTYPE_MF); normalize_commutative(src1p, src2p); // general case emit_fld_p(a, inst.size(), src1p); // fld src1p emit_fld_p(a, inst.size(), src2p); // fld src2p a.fmulp(); // fmulp emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fdiv - process a FDIV opcode //------------------------------------------------- void drcbe_x86::op_fdiv(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter src1p(*this, inst.param(1), PTYPE_MF); be_parameter src2p(*this, inst.param(2), PTYPE_MF); // general case emit_fld_p(a, inst.size(), src1p); // fld src1p emit_fld_p(a, inst.size(), src2p); // fld src2p a.fdivp(); // fdivp emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fneg - process a FNEG opcode //------------------------------------------------- void drcbe_x86::op_fneg(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case emit_fld_p(a, inst.size(), srcp); // fld srcp a.fchs(); // fchs emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fabs - process a FABS opcode //------------------------------------------------- void drcbe_x86::op_fabs(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case emit_fld_p(a, inst.size(), srcp); // fld srcp a.fabs(); // fabs emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fsqrt - process a FSQRT opcode //------------------------------------------------- void drcbe_x86::op_fsqrt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case emit_fld_p(a, inst.size(), srcp); // fld srcp a.fsqrt(); // fsqrt emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_frecip - process a FRECIP opcode //------------------------------------------------- void drcbe_x86::op_frecip(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case a.fld1(); // fld1 emit_fld_p(a, inst.size(), srcp); // fld srcp a.fdivp(); // fdivp emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_frsqrt - process a FRSQRT opcode //------------------------------------------------- void drcbe_x86::op_frsqrt(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // general case a.fld1(); // fld1 emit_fld_p(a, inst.size(), srcp); // fld srcp a.fsqrt(); // fsqrt a.fdivp(); // fdivp emit_fstp_p(a, inst.size(), dstp); // fstp dstp } //------------------------------------------------- // op_fcopyi - process a FCOPYI opcode //------------------------------------------------- void drcbe_x86::op_fcopyi(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MF); be_parameter srcp(*this, inst.param(1), PTYPE_MR); // 32-bit case if (inst.size() == 4) { if (srcp.is_memory()) { a.mov(eax, MABS(srcp.memory())); // mov eax,[srcp] a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax } else if (srcp.is_int_register()) { a.mov(MABS(dstp.memory()), Gpd(srcp.ireg())); // mov [dstp],srcp } } // 64-bit case else if (inst.size() == 8) { if (srcp.is_memory()) { a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp+4] } else if (srcp.is_int_register()) { a.mov(edx, MABS(m_reghi[srcp.ireg()])); // mov edx,[reghi[srcp]] a.mov(eax, Gpd(srcp.ireg())); // mov eax,srcp } a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx } } //------------------------------------------------- // op_icopyf - process a ICOPYF opcode //------------------------------------------------- void drcbe_x86::op_icopyf(Assembler &a, const instruction &inst) { // validate instruction assert(inst.size() == 4 || inst.size() == 8); assert_no_condition(inst); assert_no_flags(inst); // normalize parameters be_parameter dstp(*this, inst.param(0), PTYPE_MR); be_parameter srcp(*this, inst.param(1), PTYPE_MF); // 32-bit case if (inst.size() == 4) { a.mov(eax, MABS(srcp.memory())); // mov eax,[srcp] if (dstp.is_memory()) { a.mov(MABS(dstp.memory()), eax); // mov [dstp],eax } else if (dstp.is_int_register()) { a.mov(Gpd(dstp.ireg()), eax); // mov dstp,eax } } // 64-bit case else if (inst.size() == 8) { a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] a.mov(edx, MABS(srcp.memory(4))); // mov edx,[srcp+4] if (dstp.is_memory()) { a.mov(MABS(dstp.memory(0)), eax); // mov [dstp],eax a.mov(MABS(dstp.memory(4)), edx); // mov [dstp+4],edx } else { a.mov(MABS(m_reghi[dstp.ireg()]), edx); // mov [reghi[dstp]],edx a.mov(Gpd(dstp.ireg()), eax); // mov dstp,eax } } } } // anonymous namespace std::unique_ptr make_drcbe_x86( drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits) { return std::unique_ptr(new drcbe_x86(drcuml, device, cache, flags, modes, addrbits, ignorebits)); } } // namespace drc