diff options
-rw-r--r-- | scripts/src/lib.lua | 2 | ||||
-rw-r--r-- | src/devices/cpu/drcbearm64.cpp | 118 | ||||
-rw-r--r-- | src/devices/cpu/drcbex64.cpp | 175 | ||||
-rw-r--r-- | src/devices/cpu/drcbex86.cpp | 108 | ||||
-rw-r--r-- | src/lib/util/delegate.cpp | 208 | ||||
-rw-r--r-- | src/lib/util/mfpresolve.cpp | 258 | ||||
-rw-r--r-- | src/lib/util/mfpresolve.h | 86 |
7 files changed, 524 insertions, 431 deletions
diff --git a/scripts/src/lib.lua b/scripts/src/lib.lua index 5196e579d12..cc5567207ed 100644 --- a/scripts/src/lib.lua +++ b/scripts/src/lib.lua @@ -95,6 +95,8 @@ end MAME_DIR .. "src/lib/util/lrucache.h", MAME_DIR .. "src/lib/util/md5.cpp", MAME_DIR .. "src/lib/util/md5.h", + MAME_DIR .. "src/lib/util/mfpresolve.cpp", + MAME_DIR .. "src/lib/util/mfpresolve.h", MAME_DIR .. "src/lib/util/msdib.cpp", MAME_DIR .. "src/lib/util/msdib.h", MAME_DIR .. "src/lib/util/multibyte.h", diff --git a/src/devices/cpu/drcbearm64.cpp b/src/devices/cpu/drcbearm64.cpp index bae953e01f7..52fdd67e82d 100644 --- a/src/devices/cpu/drcbearm64.cpp +++ b/src/devices/cpu/drcbearm64.cpp @@ -17,6 +17,8 @@ using namespace uml; using namespace asmjit; +namespace { + const uint32_t PTYPE_M = 1 << parameter::PTYPE_MEMORY; const uint32_t PTYPE_I = 1 << parameter::PTYPE_IMMEDIATE; const uint32_t PTYPE_R = 1 << parameter::PTYPE_INT_REGISTER; @@ -56,19 +58,7 @@ const a64::Gp BASE_REG = a64::x27; // Software emulated flags (bit 0 = FLAG_C, bit 4 = FLAG_U) const a64::Gp FLAGS_REG = a64::x28; - -#define ARM_CONDITION(a, condition) (condition_map[condition - COND_Z]) -#define ARM_NOT_CONDITION(a, condition) (negateCond(condition_map[condition - COND_Z])) - -#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) -#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) -#define assert_no_flags(inst) assert((inst).flags() == 0) -#define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) - - -drcbe_arm64::opcode_generate_func drcbe_arm64::s_opcode_table[OP_MAX]; - -static const a64::Gp::Id int_register_map[REG_I_COUNT] = +const a64::Gp::Id int_register_map[REG_I_COUNT] = { a64::Gp::Id(a64::x19.id()), a64::Gp::Id(a64::x20.id()), @@ -80,7 +70,7 @@ static const a64::Gp::Id int_register_map[REG_I_COUNT] = a64::Gp::Id(a64::x26.id()), }; -static const a64::Gp::Id float_register_map[REG_F_COUNT] = +const a64::Gp::Id float_register_map[REG_F_COUNT] = { a64::Gp::Id(a64::d8.id()), a64::Gp::Id(a64::d9.id()), @@ -93,7 +83,7 @@ static const a64::Gp::Id float_register_map[REG_F_COUNT] = }; // condition mapping table -static const a64::CondCode condition_map[uml::COND_MAX - uml::COND_Z] = +const a64::CondCode condition_map[uml::COND_MAX - uml::COND_Z] = { a64::CondCode::kEQ, // COND_Z = 0x80, requires Z COND_E a64::CondCode::kNE, // COND_NZ, requires Z COND_NE @@ -113,6 +103,30 @@ static const a64::CondCode condition_map[uml::COND_MAX - uml::COND_Z] = a64::CondCode::kGE, // COND_GE, requires SV }; + +#define ARM_CONDITION(a, condition) (condition_map[condition - COND_Z]) +#define ARM_NOT_CONDITION(a, condition) (negateCond(condition_map[condition - COND_Z])) + +#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) +#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) +#define assert_no_flags(inst) assert((inst).flags() == 0) +#define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) + + +class ThrowableErrorHandler : public ErrorHandler +{ +public: + void handleError(Error err, const char *message, BaseEmitter *origin) override + { + throw emu_fatalerror("asmjit error %d: %s", err, message); + } +}; + +} // anonymous namespace + + +drcbe_arm64::opcode_generate_func drcbe_arm64::s_opcode_table[OP_MAX]; + const drcbe_arm64::opcode_table_entry drcbe_arm64::s_opcode_table_source[] = { // Compile-time opcodes @@ -208,15 +222,6 @@ const drcbe_arm64::opcode_table_entry drcbe_arm64::s_opcode_table_source[] = { uml::OP_ICOPYF, &drcbe_arm64::op_icopyf } // ICOPYF dst,src }; -class ThrowableErrorHandler : public ErrorHandler -{ -public: - void handleError(Error err, const char *message, BaseEmitter *origin) override - { - throw emu_fatalerror("asmjit error %d: %s", err, message); - } -}; - drcbe_arm64::be_parameter::be_parameter(drcbe_arm64 &drcbe, const parameter ¶m, uint32_t allowed) { int regnum; @@ -888,68 +893,9 @@ drcbe_arm64::drcbe_arm64(drcuml_state &drcuml, device_t &device, drc_cache &cach auto const resolve_accessor = [] (resolved_handler &handler, address_space &space, auto accessor) { - if (MAME_DELEGATE_USE_TYPE == MAME_DELEGATE_TYPE_ITANIUM) - { - struct { uintptr_t ptr; ptrdiff_t adj; } equiv; - assert(sizeof(accessor) == sizeof(equiv)); - *reinterpret_cast<decltype(accessor) *>(&equiv) = accessor; - handler.obj = uintptr_t(reinterpret_cast<uint8_t *>(&space) + (equiv.adj >> 1)); - if (BIT(equiv.adj, 0)) - { - auto const vptr = *reinterpret_cast<u8 const *const *>(handler.obj) + equiv.ptr; - handler.func = *reinterpret_cast<uint8_t *const *>(vptr); - } - else - { - handler.func = reinterpret_cast<uint8_t *>(equiv.ptr); - } - } - else if (MAME_DELEGATE_USE_TYPE == MAME_DELEGATE_TYPE_MSVC) - { - // interpret the pointer to member function ignoring the virtual inheritance variant - struct single { uintptr_t ptr; }; - struct multi { uintptr_t ptr; int adj; }; - struct { uintptr_t ptr; int adj; int vadj; int vindex; } unknown; - assert(sizeof(accessor) <= sizeof(unknown)); - *reinterpret_cast<decltype(accessor) *>(&unknown) = accessor; - uint32_t const *func = reinterpret_cast<uint32_t const *>(unknown.ptr); - handler.obj = uintptr_t(&space); - if ((sizeof(unknown) == sizeof(accessor)) && unknown.vindex) - { - handler.obj += unknown.vadj; - auto const vptr = *reinterpret_cast<uint8_t const *const *>(handler.obj); - handler.obj += *reinterpret_cast<int const *>(vptr + unknown.vindex); - } - if (sizeof(single) < sizeof(accessor)) - handler.obj += unknown.adj; - - // walk past thunks - while (true) - { - if ((0x90000010 == (func[0] & 0x9f00001f)) && (0x91000210 == (func[1] & 0xffc003ff)) && (0xd61f0200 == func[2])) - { - // page-relative jump with +/-4GB reach - adrp xip0,... ; add xip0,xip0,#... ; br xip0 - int64_t const page = - (uint64_t(func[0] & 0x60000000) >> 17) | - (uint64_t(func[0] & 0x00ffffe0) << 9) | - ((func[0] & 0x00800000) ? (~std::uint64_t(0) << 33) : 0); - uint32_t const offset = (func[1] & 0x003ffc00) >> 10; - func = reinterpret_cast<uint32_t const *>(((uintptr_t(func) + page) & (~uintptr_t(0) << 12)) + offset); - } - else if ((0xf9400010 == func[0]) && (0xf9400210 == (func[1] & 0xffc003ff)) && (0xd61f0200 == func[2])) - { - // virtual function call thunk - ldr xip0,[x0] ; ldr xip0,[x0,#...] ; br xip0 - uint32_t const *const *const vptr = *reinterpret_cast<uint32_t const *const *const *>(handler.obj); - func = vptr[(func[1] & 0x003ffc00) >> 10]; - } - else - { - // not something we can easily bypass - break; - } - } - handler.func = reinterpret_cast<uint8_t *>(uintptr_t(func)); - } + auto const [entrypoint, adjusted] = util::resolve_member_function(accessor, &space); + handler.func = reinterpret_cast<uint8_t *>(entrypoint); + handler.obj = adjusted; }; m_resolved_accessors.resize(m_space.size()); diff --git a/src/devices/cpu/drcbex64.cpp b/src/devices/cpu/drcbex64.cpp index c7ddf18b535..5e08d498443 100644 --- a/src/devices/cpu/drcbex64.cpp +++ b/src/devices/cpu/drcbex64.cpp @@ -172,6 +172,8 @@ #include "debug/debugcpu.h" #include "emuopts.h" +#include "mfpresolve.h" + #include <cstddef> @@ -185,6 +187,7 @@ using namespace asmjit; using namespace asmjit::x86; +namespace { //************************************************************************** // DEBUGGING @@ -229,33 +232,8 @@ const Gp::Id REG_PARAM4 = Gp::kIdCx; #endif - - -//************************************************************************** -// MACROS -//************************************************************************** - -#define X86_CONDITION(condition) (condition_map[condition - uml::COND_Z]) -#define X86_NOT_CONDITION(condition) negateCond(condition_map[condition - uml::COND_Z]) - -#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) -#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) -#define assert_no_flags(inst) assert((inst).flags() == 0) -#define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) - - - -//************************************************************************** -// GLOBAL VARIABLES -//************************************************************************** - -drcbe_x64::opcode_generate_func drcbe_x64::s_opcode_table[OP_MAX]; - -// size-to-mask table -//static const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU }; - // register mapping tables -static const Gp::Id int_register_map[REG_I_COUNT] = +const Gp::Id int_register_map[REG_I_COUNT] = { #ifdef X64_WINDOWS_ABI Gp::kIdBx, Gp::kIdSi, Gp::kIdDi, Gp::kIdR12, Gp::kIdR13, Gp::kIdR14, Gp::kIdR15, @@ -264,7 +242,7 @@ static const Gp::Id int_register_map[REG_I_COUNT] = #endif }; -static uint32_t float_register_map[REG_F_COUNT] = +uint32_t float_register_map[REG_F_COUNT] = { #ifdef X64_WINDOWS_ABI 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 @@ -276,7 +254,7 @@ static uint32_t float_register_map[REG_F_COUNT] = }; // condition mapping table -static const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = +const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = { CondCode::kZ, // COND_Z = 0x80, requires Z CondCode::kNZ, // COND_NZ, requires Z @@ -298,7 +276,7 @@ static const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = #if 0 // rounding mode mapping table -static const uint8_t fprnd_map[4] = +const uint8_t fprnd_map[4] = { FPRND_CHOP, // ROUND_TRUNC, truncate FPRND_NEAR, // ROUND_ROUND, round @@ -307,6 +285,44 @@ static const uint8_t fprnd_map[4] = }; #endif +// size-to-mask table +//const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU }; + + + +//************************************************************************** +// MACROS +//************************************************************************** + +#define X86_CONDITION(condition) (condition_map[condition - uml::COND_Z]) +#define X86_NOT_CONDITION(condition) negateCond(condition_map[condition - uml::COND_Z]) + +#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) +#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) +#define assert_no_flags(inst) assert((inst).flags() == 0) +#define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) + + + +class ThrowableErrorHandler : public ErrorHandler +{ +public: + void handleError(Error err, const char *message, BaseEmitter *origin) override + { + throw emu_fatalerror("asmjit error %d: %s", err, message); + } +}; + +} // anonymous namespace + + + +//************************************************************************** +// GLOBAL VARIABLES +//************************************************************************** + +drcbe_x64::opcode_generate_func drcbe_x64::s_opcode_table[OP_MAX]; + //************************************************************************** @@ -408,15 +424,6 @@ const drcbe_x64::opcode_table_entry drcbe_x64::s_opcode_table_source[] = { uml::OP_ICOPYF, &drcbe_x64::op_icopyf } // ICOPYF dst,src }; -class ThrowableErrorHandler : public ErrorHandler -{ -public: - void handleError(Error err, const char *message, BaseEmitter *origin) override - { - throw emu_fatalerror("asmjit error %d: %s", err, message); - } -}; - //************************************************************************** // INLINE FUNCTIONS @@ -689,65 +696,9 @@ drcbe_x64::drcbe_x64(drcuml_state &drcuml, device_t &device, drc_cache &cache, u auto const resolve_accessor = [] (resolved_handler &handler, address_space &space, auto accessor) { - if (MAME_DELEGATE_USE_TYPE == MAME_DELEGATE_TYPE_ITANIUM) - { - struct { uintptr_t ptr; ptrdiff_t adj; } equiv; - assert(sizeof(accessor) == sizeof(equiv)); - *reinterpret_cast<decltype(accessor) *>(&equiv) = accessor; - handler.obj = uintptr_t(reinterpret_cast<u8 *>(&space) + equiv.adj); - if (BIT(equiv.ptr, 0)) - { - auto const vptr = *reinterpret_cast<u8 const *const *>(handler.obj) + equiv.ptr - 1; - handler.func = *reinterpret_cast<x86code *const *>(vptr); - } - else - { - handler.func = reinterpret_cast<x86code *>(equiv.ptr); - } - } - else if (MAME_DELEGATE_USE_TYPE == MAME_DELEGATE_TYPE_MSVC) - { - // interpret the pointer to member function ignoring the virtual inheritance variant - struct single { uintptr_t ptr; }; - struct multi { uintptr_t ptr; int adj; }; - struct { uintptr_t ptr; int adj; int vadj; int vindex; } unknown; - assert(sizeof(accessor) <= sizeof(unknown)); - *reinterpret_cast<decltype(accessor) *>(&unknown) = accessor; - handler.func = reinterpret_cast<x86code *>(unknown.ptr); - handler.obj = uintptr_t(&space); - if ((sizeof(unknown) == sizeof(accessor)) && unknown.vindex) - { - handler.obj += unknown.vadj; - auto const vptr = *reinterpret_cast<std::uint8_t const *const *>(handler.obj); - handler.obj += *reinterpret_cast<int const *>(vptr + unknown.vindex); - } - if (sizeof(single) < sizeof(accessor)) - handler.obj += unknown.adj; - - // walk past thunks - while (true) - { - if (0xe9 == handler.func[0]) - { - // absolute jump with 32-bit displacement - handler.func += 5 + *reinterpret_cast<s32 const *>(handler.func + 1); - } - else if ((0x48 == handler.func[0]) && (0x8b == handler.func[1]) && (0x01 == handler.func[2]) && (0xff == handler.func[3]) && ((0x60 == handler.func[4]) || (0xa0 == handler.func[4]))) - { - // virtual function call thunk - auto const vptr = *reinterpret_cast<std::uint8_t const *const *>(handler.obj); - if (0x60 == handler.func[4]) - handler.func = *reinterpret_cast<x86code *const *>(vptr + *reinterpret_cast<s8 const *>(handler.func + 5)); - else - handler.func = *reinterpret_cast<x86code *const *>(vptr + *reinterpret_cast<s32 const *>(handler.func + 5)); - } - else - { - // not something we can easily bypass - break; - } - } - } + auto const [entrypoint, adjusted] = util::resolve_member_function(accessor, &space); + handler.func = reinterpret_cast<x86code *>(entrypoint); + handler.obj = adjusted; }; m_resolved_accessors.resize(m_space.size()); for (int space = 0; m_space.size() > space; ++space) @@ -1772,9 +1723,12 @@ void drcbe_x64::op_exh(Assembler &a, const instruction &inst) drccodeptr *targetptr = handp.handle().codeptr_addr(); // perform the exception processing - Label no_exception = a.newLabel(); + Label no_exception; if (inst.condition() != uml::COND_ALWAYS) + { + no_exception = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), no_exception); // jcc no_exception + } mov_mem_param(a, MABS(&m_state.exp, 4), exp); // mov [exp],exp if (*targetptr != nullptr) a.call(imm(*targetptr)); // call *targetptr @@ -1804,9 +1758,12 @@ void drcbe_x64::op_callh(Assembler &a, const instruction &inst) drccodeptr *targetptr = handp.handle().codeptr_addr(); // skip if conditional - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // jump through the handle; directly if a normal jump if (*targetptr != nullptr) @@ -1833,9 +1790,12 @@ void drcbe_x64::op_ret(Assembler &a, const instruction &inst) assert(inst.numparams() == 0); // skip if conditional - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // return a.lea(rsp, ptr(rsp, 40)); // lea rsp,[rsp+40] @@ -1864,9 +1824,12 @@ void drcbe_x64::op_callc(Assembler &a, const instruction &inst) be_parameter paramp(*this, inst.param(1), PTYPE_M); // skip if conditional - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // perform the call mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)paramp.memory()); // mov param1,paramp @@ -2958,9 +2921,12 @@ void drcbe_x64::op_mov(Assembler &a, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MRI); // add a conditional branch unless a conditional move is possible - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS && !(dstp.is_int_register() && !srcp.is_immediate())) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); + } // register to memory if (dstp.is_memory() && srcp.is_int_register()) @@ -4529,9 +4495,12 @@ void drcbe_x64::op_fmov(Assembler &a, const instruction &inst) Xmm dstreg = dstp.select_register(xmm0); // always start with a jmp - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // 32-bit form if (inst.size() == 4) diff --git a/src/devices/cpu/drcbex86.cpp b/src/devices/cpu/drcbex86.cpp index aadd119a361..19fc002949b 100644 --- a/src/devices/cpu/drcbex86.cpp +++ b/src/devices/cpu/drcbex86.cpp @@ -100,6 +100,7 @@ using namespace asmjit; using namespace asmjit::x86; +namespace { //************************************************************************** // DEBUGGING @@ -124,42 +125,21 @@ const uint32_t PTYPE_MRI = PTYPE_M | PTYPE_R | PTYPE_I; const uint32_t PTYPE_MF = PTYPE_M | PTYPE_F; - -//************************************************************************** -// MACROS -//************************************************************************** - -#define X86_CONDITION(condition) (condition_map[condition - uml::COND_Z]) -#define X86_NOT_CONDITION(condition) negateCond(condition_map[condition - uml::COND_Z]) - -#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) -#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) -#define assert_no_flags(inst) assert((inst).flags() == 0) -#define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) - - - -//************************************************************************** -// GLOBAL VARIABLES -//************************************************************************** - -drcbe_x86::opcode_generate_func drcbe_x86::s_opcode_table[OP_MAX]; - // size-to-mask table -//static const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU }; +//const uint64_t size_to_mask[] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0, 0xffffffffffffffffU }; // register mapping tables -static const Gp::Id int_register_map[REG_I_COUNT] = +const Gp::Id int_register_map[REG_I_COUNT] = { Gp::kIdBx, Gp::kIdSi, Gp::kIdDi, Gp::kIdBp }; // flags mapping tables -static uint8_t flags_map[0x1000]; -static uint32_t flags_unmap[0x20]; +uint8_t flags_map[0x1000]; +uint32_t flags_unmap[0x20]; // condition mapping table -static const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = +const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = { CondCode::kZ, // COND_Z = 0x80, requires Z CondCode::kNZ, // COND_NZ, requires Z @@ -180,7 +160,7 @@ static const CondCode condition_map[uml::COND_MAX - uml::COND_Z] = }; // FPU control register mapping -static const uint16_t fp_control[4] = +const uint16_t fp_control[4] = { 0x0e3f, // ROUND_TRUNC 0x023f, // ROUND_ROUND @@ -191,6 +171,41 @@ static const uint16_t fp_control[4] = //************************************************************************** +// MACROS +//************************************************************************** + +#define X86_CONDITION(condition) (condition_map[condition - uml::COND_Z]) +#define X86_NOT_CONDITION(condition) negateCond(condition_map[condition - uml::COND_Z]) + +#define assert_no_condition(inst) assert((inst).condition() == uml::COND_ALWAYS) +#define assert_any_condition(inst) assert((inst).condition() == uml::COND_ALWAYS || ((inst).condition() >= uml::COND_Z && (inst).condition() < uml::COND_MAX)) +#define assert_no_flags(inst) assert((inst).flags() == 0) +#define assert_flags(inst, valid) assert(((inst).flags() & ~(valid)) == 0) + + + +class ThrowableErrorHandler : public ErrorHandler +{ +public: + void handleError(Error err, const char *message, BaseEmitter *origin) override + { + throw emu_fatalerror("asmjit error %d: %s", err, message); + } +}; + +} // anonymous namespace + + + +//************************************************************************** +// GLOBAL VARIABLES +//************************************************************************** + +drcbe_x86::opcode_generate_func drcbe_x86::s_opcode_table[OP_MAX]; + + + +//************************************************************************** // TABLES //************************************************************************** @@ -289,15 +304,6 @@ const drcbe_x86::opcode_table_entry drcbe_x86::s_opcode_table_source[] = { uml::OP_ICOPYF, &drcbe_x86::op_icopyf }, // ICOPYF dst,src }; -class ThrowableErrorHandler : public ErrorHandler -{ -public: - void handleError(Error err, const char *message, BaseEmitter *origin) override - { - throw emu_fatalerror("asmjit error %d: %s", err, message); - } -}; - //************************************************************************** // INLINE FUNCTIONS @@ -2596,9 +2602,12 @@ void drcbe_x86::op_exh(Assembler &a, const instruction &inst) drccodeptr *targetptr = handp.handle().codeptr_addr(); // perform the exception processing - Label no_exception = a.newLabel(); + Label no_exception; if (inst.condition() != uml::COND_ALWAYS) + { + no_exception = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), no_exception); // jcc no_exception + } emit_mov_m32_p32(a, MABS(&m_state.exp, 4), exp); // mov [exp],exp if (*targetptr != nullptr) a.call(imm(*targetptr)); // call *targetptr @@ -2628,9 +2637,12 @@ void drcbe_x86::op_callh(Assembler &a, const instruction &inst) drccodeptr *targetptr = handp.handle().codeptr_addr(); // skip if conditional - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // jump through the handle; directly if a normal jump if (*targetptr != nullptr) @@ -2660,9 +2672,12 @@ void drcbe_x86::op_ret(Assembler &a, const instruction &inst) assert(inst.numparams() == 0); // skip if conditional - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // return a.lea(esp, ptr(esp, 28)); // lea rsp,[rsp+28] @@ -2694,9 +2709,12 @@ void drcbe_x86::op_callc(Assembler &a, const instruction &inst) be_parameter paramp(*this, inst.param(1), PTYPE_M); // skip if conditional - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // perform the call a.mov(dword_ptr(esp, 0), imm(paramp.memory())); // mov [esp],paramp @@ -3717,9 +3735,12 @@ void drcbe_x86::op_mov(Assembler &a, const instruction &inst) Gp const dstreg = dstp.select_register(eax); // add a conditional branch unless a conditional move is possible - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS && ((inst.size() == 8) || !(dstp.is_int_register() && !srcp.is_immediate()))) + { + skip = a.newLabel(); a.short_().j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // 32-bit form if (inst.size() == 4) @@ -6003,9 +6024,12 @@ void drcbe_x86::op_fmov(Assembler &a, const instruction &inst) be_parameter srcp(*this, inst.param(1), PTYPE_MF); // always start with a jmp - Label skip = a.newLabel(); + Label skip; if (inst.condition() != uml::COND_ALWAYS) + { + skip = a.newLabel(); a.j(X86_NOT_CONDITION(inst.condition()), skip); // jcc skip + } // general case a.mov(eax, MABS(srcp.memory(0))); // mov eax,[srcp] diff --git a/src/lib/util/delegate.cpp b/src/lib/util/delegate.cpp index 8949f90f146..67fb7e72d42 100644 --- a/src/lib/util/delegate.cpp +++ b/src/lib/util/delegate.cpp @@ -10,6 +10,8 @@ #include "delegate.h" +#include "mfpresolve.h" + #include <cstdio> #include <sstream> @@ -70,31 +72,9 @@ const delegate_mfp_compatible::raw_mfp_data delegate_mfp_compatible::s_null_mfp delegate_generic_function delegate_mfp_itanium::convert_to_generic(delegate_generic_class *&object) const { - // apply the "this" delta to the object first - the value is shifted to the left one bit position for the ARM-like variant - LOG("Input this=%p ptr=%p adj=%ld ", reinterpret_cast<void const *>(object), reinterpret_cast<void const *>(m_function), long(m_this_delta)); - object = reinterpret_cast<delegate_generic_class *>( - reinterpret_cast<std::uint8_t *>(object) + (m_this_delta >> ((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? 1 : 0))); - LOG("Calculated this=%p ", reinterpret_cast<void const *>(object)); - - // test the virtual member function flag - it's the low bit of either the ptr or adj field, depending on the variant - if ((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? !(m_this_delta & 1) : !(m_function & 1)) - { - // conventional function pointer - LOG("ptr=%p\n", reinterpret_cast<void const *>(m_function)); - return reinterpret_cast<delegate_generic_function>(m_function); - } - else - { - // byte index into the vtable to the function - std::uint8_t const *const vtable_ptr = *reinterpret_cast<std::uint8_t const *const *>(object) + m_function - ((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? 0 : 1); - delegate_generic_function result; - if (MAME_ABI_CXX_VTABLE_FNDESC) - result = reinterpret_cast<delegate_generic_function>(uintptr_t(vtable_ptr)); - else - result = *reinterpret_cast<delegate_generic_function const *>(vtable_ptr); - LOG("ptr=%p (vtable)\n", reinterpret_cast<void const *>(result)); - return result; - } + auto const [entrypoint, adjusted] = detail::resolve_member_function_itanium(m_function, m_this_delta, object); + object = reinterpret_cast<delegate_generic_class *>(adjusted); + return reinterpret_cast<delegate_generic_function>(entrypoint); } @@ -107,181 +87,9 @@ delegate_generic_function delegate_mfp_itanium::convert_to_generic(delegate_gene delegate_generic_function delegate_mfp_msvc::adjust_this_pointer(delegate_generic_class *&object) const { - LOG("Input this=%p ", reinterpret_cast<void const *>(object)); - if (sizeof(single_base_equiv) < m_size) - LOG("thisdelta=%d ", m_this_delta); - if (sizeof(unknown_base_equiv) == m_size) - LOG("vptrdelta=%d vindex=%d ", m_vptr_offs, m_vt_index); - std::uint8_t *byteptr = reinterpret_cast<std::uint8_t *>(object); - - // test for pointer to member function cast across virtual inheritance relationship - if ((sizeof(unknown_base_equiv) == m_size) && m_vt_index) - { - // add offset from "this" pointer to location of vptr, and add offset to virtual base from vtable - byteptr += m_vptr_offs; - std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(byteptr); - byteptr += *reinterpret_cast<int const *>(vptr + m_vt_index); - } - - // add "this" pointer displacement if present in the pointer to member function - if (sizeof(single_base_equiv) < m_size) - byteptr += m_this_delta; - LOG("Calculated this=%p\n", reinterpret_cast<void const *>(byteptr)); - object = reinterpret_cast<delegate_generic_class *>(byteptr); - - // walk past recognisable thunks -#if defined(__x86_64__) || defined(_M_X64) - std::uint8_t const *func = reinterpret_cast<std::uint8_t const *>(m_function); - while (true) - { - // Assumes Windows calling convention, and doesn't consider that - // the "this" pointer could be in RDX if RCX is a pointer to - // space for an oversize scalar result. Since the result area - // is uninitialised on entry, you won't see something that looks - // like a vtable dispatch through RCX in this case - it won't - // behave badly, it just won't bypass virtual call thunks in the - // rare situations where the return type is an oversize scalar. - if (0xe9 == func[0]) - { - // relative jump with 32-bit displacement (typically a resolved PLT entry) - LOG("Found relative jump at %p ", func); - func += std::ptrdiff_t(5) + *reinterpret_cast<std::int32_t const *>(func + 1); - LOG("redirecting to %p\n", func); - continue; - } - else if ((0x48 == func[0]) && (0x8b == func[1]) && (0x01 == func[2])) - { - if ((0xff == func[3]) && ((0x20 == func[4]) || (0x60 == func[4]) || (0xa0 == func[4]))) - { - // MSVC virtual function call thunk - mov rax,QWORD PTR [rcx] ; jmp QWORD PTR [rax+...] - LOG("Found virtual member function thunk at %p ", func); - std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); - if (0x20 == func[4]) // no displacement - func = *reinterpret_cast<std::uint8_t const *const *>(vptr); - else if (0x60 == func[4]) // 8-bit displacement - func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int8_t const *>(func + 5)); - else // 32-bit displacement - func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int32_t const *>(func + 5)); - LOG("redirecting to %p\n", func); - continue; - } - else if ((0x48 == func[3]) && (0x8b == func[4])) - { - // clang virtual function call thunk - mov rax,QWORD PTR [rcx] ; mov rax,QWORD PTR [rax+...] ; jmp rax - if ((0x00 == func[5]) && (0x48 == func[6]) && (0xff == func[7]) && (0xe0 == func[8])) - { - // no displacement - LOG("Found virtual member function thunk at %p ", func); - std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); - func = *reinterpret_cast<std::uint8_t const *const *>(vptr); - LOG("redirecting to %p\n", func); - continue; - } - else if ((0x40 == func[5]) && (0x48 == func[7]) && (0xff == func[8]) && (0xe0 == func[9])) - { - // 8-bit displacement - LOG("Found virtual member function thunk at %p ", func); - std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); - func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int8_t const *>(func + 6)); - LOG("redirecting to %p\n", func); - continue; - } - else if ((0x80 == func[5]) && (0x48 == func[10]) && (0xff == func[11]) && (0xe0 == func[12])) - { - // 32-bit displacement - LOG("Found virtual member function thunk at %p ", func); - std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); - func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int32_t const *>(func + 6)); - LOG("redirecting to %p\n", func); - continue; - } - } - } - - // clang uses unoptimised thunks if optimisation is disabled - // Without optimisation, clang produces thunks like: - // 50 push rax - // 48 89 0c 24 mov QWORD PTR [rsp],rcx - // 48 8b 0c 24 mov rcx,QWORD PTR [rsp] - // 48 8b 01 mov rax,QWORD PTR [rcx] - // 48 8b 80 xx xx xx xx mov rax,QWORD PTR [rax+...] - // 41 5a pop r10 - // 48 ff e0 jmp rax - // Trying to decode these thunks likely isn't worth the effort. - // Chasing performance in unoptimised builds isn't very useful, - // and the format of these thunks may be fragile. - - // not something we can easily bypass - break; - } - return reinterpret_cast<delegate_generic_function>(std::uintptr_t(func)); -#elif defined(__aarch64__) || defined(_M_ARM64) - std::uint32_t const *func = reinterpret_cast<std::uint32_t const *>(m_function); - while (true) - { - // Assumes little Endian mode. Instructions are always stored - // in little Endian format on AArch64, so if big Endian mode is - // to be supported, the values need to be swapped. - if ((0x90000010 == (func[0] & 0x9f00001f)) && (0x91000210 == (func[1] & 0xffc003ff)) && (0xd61f0200 == func[2])) - { - // page-relative jump with +/-4GB reach - adrp xip0,... ; add xip0,xip0,#... ; br xip0 - LOG("Found page-relative jump at %p ", func); - std::int64_t const page = - (std::uint64_t(func[0] & 0x60000000) >> 17) | - (std::uint64_t(func[0] & 0x00ffffe0) << 9) | - ((func[0] & 0x00800000) ? (~std::uint64_t(0) << 33) : 0); - std::uint32_t const offset = (func[1] & 0x003ffc00) >> 10; - func = reinterpret_cast<std::uint32_t const *>(((std::uintptr_t(func) + page) & (~std::uintptr_t(0) << 12)) + offset); - LOG("redirecting to %p\n", func); - } - else if ((0xf9400010 == func[0]) && (0xf9400210 == (func[1] & 0xffc003ff)) && (0xd61f0200 == func[2])) - { - // virtual function call thunk - ldr xip0,[x0] ; ldr xip0,[x0,#...] ; br xip0 - LOG("Found virtual member function thunk at %p ", func); - std::uint32_t const *const *const vptr = *reinterpret_cast<std::uint32_t const *const *const *>(object); - func = vptr[(func[1] & 0x003ffc00) >> 10]; - LOG("redirecting to %p\n", func); - } - else - { - // not something we can easily bypass - break; - } - - // clang uses horribly sub-optimal thunks for AArch64 - // Without optimisation, clang produces thunks like: - // d10143ff sub sp,sp,#80 - // f90027e7 str x7,[sp,#72] - // f90023e6 str x6,[sp,#64] - // f9001fe5 str x5,[sp,#56] - // f9001be4 str x4,[sp,#48] - // f90017e3 str x3,[sp,#40] - // f90013e2 str x2,[sp,#32] - // f9000fe1 str x1,[sp,#24] - // f90007e0 str x0,[sp,#8] - // f94007e0 ldr x0,[sp,#8] - // f9400009 ldr x9,[x0] - // f9400129 ldr x9,[x9,#...] - // 910143ff add sp,sp,#80 - // d61f0120 br x9 - // With optimisation, clang produces thunks like: - // d10103ff sub sp,sp,#64 - // a9008be1 stp x1,x2,[sp,#8] - // a90193e3 stp x3,x4,[sp,#24] - // a9029be5 stp x5,x6,[sp,#40] - // f9001fe7 str x7,[sp,#56] - // f9400009 ldr x9,[x0] - // f9400129 ldr x9,[x9,#...] - // 910103ff add sp,sp,#64 - // d61f0120 br x9 - // It's more effort than it's worth to try decoding these - // thunks. - - } - return reinterpret_cast<delegate_generic_function>(std::uintptr_t(func)); -#else - return reinterpret_cast<delegate_generic_function>(m_function); -#endif + auto const [entrypoint, adjusted] = detail::resolve_member_function_msvc(&m_function, m_size, object); + object = reinterpret_cast<delegate_generic_class *>(adjusted); + return reinterpret_cast<delegate_generic_function>(entrypoint); } } // namespace util::detail diff --git a/src/lib/util/mfpresolve.cpp b/src/lib/util/mfpresolve.cpp new file mode 100644 index 00000000000..4542a29229d --- /dev/null +++ b/src/lib/util/mfpresolve.cpp @@ -0,0 +1,258 @@ +// license:BSD-3-Clause +// copyright-holders:Vas Crabb +/*************************************************************************** + + mfpresolve.h + + Helpers for resolving member function pointers to entry points. + +***************************************************************************/ + +#include "mfpresolve.h" + +#include "osdcomm.h" + +#include <cstdio> + + +//************************************************************************** +// MACROS +//************************************************************************** + +#if defined(MAME_DELEGATE_LOG_ADJ) + #define LOG(...) printf(__VA_ARGS__) +#else + #define LOG(...) do { if (false) printf(__VA_ARGS__); } while (false) +#endif + + + +namespace util::detail { + +std::pair<std::uintptr_t, std::uintptr_t> resolve_member_function_itanium( + std::uintptr_t function, + std::ptrdiff_t delta, + void const *object) noexcept +{ + // apply the "this" delta to the object first - the value is shifted to the left one bit position for the ARM-like variant + LOG("Input this=%p ptr=%p adj=%ld ", object, reinterpret_cast<void const *>(function), long(delta)); + constexpr int deltashift = (MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? 1 : 0; + object = reinterpret_cast<std::uint8_t const *>(object) + (delta >> deltashift); + LOG("Calculated this=%p ", object); + + // test the virtual member function flag - it's the low bit of either the ptr or adj field, depending on the variant + if ((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? !(delta & 1) : !(function & 1)) + { + // conventional function pointer + LOG("ptr=%p\n", reinterpret_cast<void const *>(function)); + return std::make_pair(function, std::uintptr_t(object)); + } + else + { + // byte index into the vtable to the function + auto const vtable_ptr = *reinterpret_cast<std::uint8_t const *const *>(object) + function - ((MAME_ABI_CXX_ITANIUM_MFP_TYPE == MAME_ABI_CXX_ITANIUM_MFP_ARM) ? 0 : 1); + std::uintptr_t result; + if (MAME_ABI_CXX_VTABLE_FNDESC) + result = std::uintptr_t(vtable_ptr); + else + result = *reinterpret_cast<std::uintptr_t const *>(vtable_ptr); + LOG("ptr=%p (vtable)\n", reinterpret_cast<void const *>(result)); + return std::make_pair(result, std::uintptr_t(object)); + } +} + + +std::pair<std::uintptr_t, std::uintptr_t> resolve_member_function_msvc( + void const *funcptr, + std::size_t size, + void const *object) noexcept +{ + struct single { std::uintptr_t entrypoint; }; + struct multi { std::uintptr_t entrypoint; int this_delta; }; + struct { std::uintptr_t entrypoint; int this_delta; int vptr_offs; int vt_index; } const *unknown; + assert(sizeof(*unknown) >= size); + unknown = reinterpret_cast<decltype(unknown)>(funcptr); + + LOG("Input this=%p ", object); + if (sizeof(single) < size) + LOG("thisdelta=%d ", unknown->this_delta); + if (sizeof(*unknown) == size) + LOG("vptrdelta=%d vindex=%d ", unknown->vptr_offs, unknown->vt_index); + auto byteptr = reinterpret_cast<std::uint8_t const *>(object); + + // test for pointer to member function cast across virtual inheritance relationship + if ((sizeof(*unknown) == size) && unknown->vt_index) + { + // add offset from "this" pointer to location of vptr, and add offset to virtual base from vtable + byteptr += unknown->vptr_offs; + std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(byteptr); + byteptr += *reinterpret_cast<int const *>(vptr + unknown->vt_index); + } + + // add "this" pointer displacement if present in the pointer to member function + if (sizeof(single) < size) + byteptr += unknown->this_delta; + LOG("Calculated this=%p\n", reinterpret_cast<void const *>(byteptr)); + + // walk past recognisable thunks + return std::make_pair(bypass_member_function_thunks(unknown->entrypoint, byteptr), std::uintptr_t(byteptr)); +} + + +std::uintptr_t bypass_member_function_thunks( + std::uintptr_t entrypoint, + void const *object) noexcept +{ +#if defined(__x86_64__) || defined(_M_X64) + std::uint8_t const *func = reinterpret_cast<std::uint8_t const *>(entrypoint); + while (true) + { + // Assumes Windows calling convention, and doesn't consider that + // the "this" pointer could be in RDX if RCX is a pointer to + // space for an oversize scalar result. Since the result area + // is uninitialised on entry, you won't see something that looks + // like a vtable dispatch through RCX in this case - it won't + // behave badly, it just won't bypass virtual call thunks in the + // rare situations where the return type is an oversize scalar. + if (0xe9 == func[0]) + { + // relative jump with 32-bit displacement (typically a resolved PLT entry) + LOG("Found relative jump at %p ", func); + func += std::ptrdiff_t(5) + *reinterpret_cast<std::int32_t const *>(func + 1); + LOG("redirecting to %p\n", func); + continue; + } + else if (object && (0x48 == func[0]) && (0x8b == func[1]) && (0x01 == func[2])) + { + if ((0xff == func[3]) && ((0x20 == func[4]) || (0x60 == func[4]) || (0xa0 == func[4]))) + { + // MSVC virtual function call thunk - mov rax,QWORD PTR [rcx] ; jmp QWORD PTR [rax+...] + LOG("Found virtual member function thunk at %p ", func); + std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); + if (0x20 == func[4]) // no displacement + func = *reinterpret_cast<std::uint8_t const *const *>(vptr); + else if (0x60 == func[4]) // 8-bit displacement + func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int8_t const *>(func + 5)); + else // 32-bit displacement + func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int32_t const *>(func + 5)); + LOG("redirecting to %p\n", func); + continue; + } + else if ((0x48 == func[3]) && (0x8b == func[4])) + { + // clang virtual function call thunk - mov rax,QWORD PTR [rcx] ; mov rax,QWORD PTR [rax+...] ; jmp rax + if ((0x00 == func[5]) && (0x48 == func[6]) && (0xff == func[7]) && (0xe0 == func[8])) + { + // no displacement + LOG("Found virtual member function thunk at %p ", func); + std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); + func = *reinterpret_cast<std::uint8_t const *const *>(vptr); + LOG("redirecting to %p\n", func); + continue; + } + else if ((0x40 == func[5]) && (0x48 == func[7]) && (0xff == func[8]) && (0xe0 == func[9])) + { + // 8-bit displacement + LOG("Found virtual member function thunk at %p ", func); + std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); + func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int8_t const *>(func + 6)); + LOG("redirecting to %p\n", func); + continue; + } + else if ((0x80 == func[5]) && (0x48 == func[10]) && (0xff == func[11]) && (0xe0 == func[12])) + { + // 32-bit displacement + LOG("Found virtual member function thunk at %p ", func); + std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object); + func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int32_t const *>(func + 6)); + LOG("redirecting to %p\n", func); + continue; + } + } + } + + // clang uses unoptimised thunks if optimisation is disabled + // Without optimisation, clang produces thunks like: + // 50 push rax + // 48 89 0c 24 mov QWORD PTR [rsp],rcx + // 48 8b 0c 24 mov rcx,QWORD PTR [rsp] + // 48 8b 01 mov rax,QWORD PTR [rcx] + // 48 8b 80 xx xx xx xx mov rax,QWORD PTR [rax+...] + // 41 5a pop r10 + // 48 ff e0 jmp rax + // Trying to decode these thunks likely isn't worth the effort. + // Chasing performance in unoptimised builds isn't very useful, + // and the format of these thunks may be fragile. + + // not something we can easily bypass + break; + } + return std::uintptr_t(func); +#elif defined(__aarch64__) || defined(_M_ARM64) + std::uint32_t const *func = reinterpret_cast<std::uint32_t const *>(entrypoint); + auto const fetch = [&func] (auto offset) { return little_endianize_int32(func[offset]); }; + while (true) + { + if ((0x90000010 == (fetch(0) & 0x9f00001f)) && (0x91000210 == (fetch(1) & 0xffc003ff)) && (0xd61f0200 == fetch(2))) + { + // page-relative jump with +/-4GB reach - adrp xip0,... ; add xip0,xip0,#... ; br xip0 + LOG("Found page-relative jump at %p ", func); + std::int64_t const page = + (std::uint64_t(fetch(0) & 0x60000000) >> 17) | + (std::uint64_t(fetch(0) & 0x00ffffe0) << 9) | + ((fetch(0) & 0x00800000) ? (~std::uint64_t(0) << 33) : 0); + std::uint32_t const offset = (fetch(1) & 0x003ffc00) >> 10; + func = reinterpret_cast<std::uint32_t const *>(((std::uintptr_t(func) + page) & (~std::uintptr_t(0) << 12)) + offset); + LOG("redirecting to %p\n", func); + } + else if (object && (0xf9400010 == fetch(0)) && (0xf9400210 == (fetch(1) & 0xffc003ff)) && (0xd61f0200 == fetch(2))) + { + // virtual function call thunk - ldr xip0,[x0] ; ldr xip0,[x0,#...] ; br xip0 + LOG("Found virtual member function thunk at %p ", func); + auto const vptr = *reinterpret_cast<std::uint32_t const *const *const *>(object); + func = vptr[(fetch(1) & 0x003ffc00) >> 10]; + LOG("redirecting to %p\n", func); + } + else + { + // not something we can easily bypass + break; + } + + // clang uses horribly sub-optimal thunks for AArch64 + // Without optimisation, clang produces thunks like: + // d10143ff sub sp,sp,#80 + // f90027e7 str x7,[sp,#72] + // f90023e6 str x6,[sp,#64] + // f9001fe5 str x5,[sp,#56] + // f9001be4 str x4,[sp,#48] + // f90017e3 str x3,[sp,#40] + // f90013e2 str x2,[sp,#32] + // f9000fe1 str x1,[sp,#24] + // f90007e0 str x0,[sp,#8] + // f94007e0 ldr x0,[sp,#8] + // f9400009 ldr x9,[x0] + // f9400129 ldr x9,[x9,#...] + // 910143ff add sp,sp,#80 + // d61f0120 br x9 + // With optimisation, clang produces thunks like: + // d10103ff sub sp,sp,#64 + // a9008be1 stp x1,x2,[sp,#8] + // a90193e3 stp x3,x4,[sp,#24] + // a9029be5 stp x5,x6,[sp,#40] + // f9001fe7 str x7,[sp,#56] + // f9400009 ldr x9,[x0] + // f9400129 ldr x9,[x9,#...] + // 910103ff add sp,sp,#64 + // d61f0120 br x9 + // It's more effort than it's worth to try decoding these + // thunks. + + } + return std::uintptr_t(func); +#else + return entrypoint; +#endif +} + +} // namespace util::detail diff --git a/src/lib/util/mfpresolve.h b/src/lib/util/mfpresolve.h new file mode 100644 index 00000000000..c9a2eba6a95 --- /dev/null +++ b/src/lib/util/mfpresolve.h @@ -0,0 +1,86 @@ +// license:BSD-3-Clause +// copyright-holders:Vas Crabb +/*************************************************************************** + + mfpresolve.h + + Helpers for resolving member function pointers to entry points. + +***************************************************************************/ +#ifndef MAME_LIB_UTIL_MFPRESOLVE_H +#define MAME_LIB_UTIL_MFPRESOLVE_H + +#pragma once + +#include "abi.h" + +#include <cassert> +#include <cstdint> +#include <utility> + + +namespace util { + +namespace detail { + +std::pair<std::uintptr_t, std::uintptr_t> resolve_member_function_itanium(std::uintptr_t function, std::ptrdiff_t delta, void const *object) noexcept; +std::pair<std::uintptr_t, std::uintptr_t> resolve_member_function_msvc(void const *funcptr, std::size_t size, void const *object) noexcept; +std::uintptr_t bypass_member_function_thunks(std::uintptr_t entrypoint, void const *object) noexcept; + +} // namespace detail + + +template <typename T, typename U> +inline T bypass_member_function_thunks(T entrypoint, U const *object) noexcept +{ + return reinterpret_cast<T>( + detail::bypass_member_function_thunks( + reinterpret_cast<std::uintptr_t>(entrypoint), + reinterpret_cast<void const *>(object))); +} + + +template <typename T, typename Ret, typename... Params> +inline std::pair<std::uintptr_t, std::uintptr_t> resolve_member_function(Ret (T::*function)(Params...), T *object) noexcept +{ + if (MAME_ABI_CXX_TYPE == MAME_ABI_CXX_ITANIUM) + { + struct { uintptr_t ptr; ptrdiff_t adj; } equiv; + static_assert(sizeof(function) == sizeof(equiv)); + *reinterpret_cast<decltype(function) *>(&equiv) = function; + return detail::resolve_member_function_itanium(equiv.ptr, equiv.adj, object); + } + else if (MAME_ABI_CXX_TYPE == MAME_ABI_CXX_MSVC) + { + return detail::resolve_member_function_msvc(&function, sizeof(function), object); + } + else + { + return std::make_pair(std::uintptr_t(nullptr), std::uintptr_t(nullptr)); + } +} + + +template <typename T, typename Ret, typename... Params> +inline std::pair<std::uintptr_t, std::uintptr_t> resolve_member_function(Ret (T::*function)(Params...) const, T const *object) noexcept +{ + if (MAME_ABI_CXX_TYPE == MAME_ABI_CXX_ITANIUM) + { + struct { uintptr_t ptr; ptrdiff_t adj; } equiv; + static_assert(sizeof(function) == sizeof(equiv)); + *reinterpret_cast<decltype(function) *>(&equiv) = function; + return detail::resolve_member_function_itanium(equiv.ptr, equiv.adj, object); + } + else if (MAME_ABI_CXX_TYPE == MAME_ABI_CXX_MSVC) + { + return detail::resolve_member_function_msvc(&function, sizeof(function), object); + } + else + { + return std::make_pair(std::uintptr_t(nullptr), std::uintptr_t(nullptr)); + } +} + +} // namespace util + +#endif // MAME_LIB_UTIL_MFPRESOLVE_H |