summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
author Vas Crabb <vas@vastheman.com>2021-09-22 05:05:19 +1000
committer Vas Crabb <vas@vastheman.com>2021-09-22 05:05:19 +1000
commit86c005377d62adf7996f3d6def3046e3fde1da27 (patch)
tree8f4ac56e77d3d2cc250fb8be4e3627404aafefcb
parenta0289bfd4c041a780440e1d1e0055b531069dd50 (diff)
cpu/drcbex64.cpp: Proof-of-concept for optimisation of calling out.
Resolve address space virtual member function addresses on constrcution and call them directly. Provides a small but measurable improvement to performance in drivers that use the recompiler and access the memory system a lot. Also made MSVC delegates capable of walking past all the thunks MSVC puts in the way of actually calling a member function. I'm not accounting for the "this" pointer being passed in RDX when the return value is an oversize scalar. This is harmless because it won't see anything that looks like a virtual call thunk using RCX when RCX points to uninitialised space for the return value. It just means virtual member function calls won't be bypassed if the return value is an oversize scalar, but that doesn't happen frequently anyway.
-rw-r--r--src/devices/cpu/drcbex64.cpp309
-rw-r--r--src/devices/cpu/drcbex64.h44
-rw-r--r--src/emu/validity.cpp30
-rw-r--r--src/lib/util/delegate.cpp42
-rw-r--r--src/lib/util/delegate.h7
5 files changed, 361 insertions, 71 deletions
diff --git a/src/devices/cpu/drcbex64.cpp b/src/devices/cpu/drcbex64.cpp
index a25f1262bf4..d48e43d80b7 100644
--- a/src/devices/cpu/drcbex64.cpp
+++ b/src/devices/cpu/drcbex64.cpp
@@ -611,18 +611,18 @@ inline void drcbe_x64::smart_call_m64(Assembler &a, x86code **target)
//-------------------------------------------------
drcbe_x64::drcbe_x64(drcuml_state &drcuml, device_t &device, drc_cache &cache, uint32_t flags, int modes, int addrbits, int ignorebits)
- : drcbe_interface(drcuml, cache, device),
- m_hash(cache, modes, addrbits, ignorebits),
- m_map(cache, 0xaaaaaaaa5555),
- m_log(nullptr),
- m_log_asmjit(nullptr),
- m_absmask32((uint32_t *)cache.alloc_near(16*2 + 15)),
- m_absmask64(nullptr),
- m_rbpvalue(cache.near() + 0x80),
- m_entry(nullptr),
- m_exit(nullptr),
- m_nocode(nullptr),
- m_near(*(near_state *)cache.alloc_near(sizeof(m_near)))
+ : drcbe_interface(drcuml, cache, device)
+ , m_hash(cache, modes, addrbits, ignorebits)
+ , m_map(cache, 0xaaaaaaaa5555)
+ , m_log(nullptr)
+ , m_log_asmjit(nullptr)
+ , m_absmask32((uint32_t *)cache.alloc_near(16*2 + 15))
+ , m_absmask64(nullptr)
+ , m_rbpvalue(cache.near() + 0x80)
+ , m_entry(nullptr)
+ , m_exit(nullptr)
+ , m_nocode(nullptr)
+ , m_near(*(near_state *)cache.alloc_near(sizeof(m_near)))
{
// build up necessary arrays
static const uint32_t sse_control[4] =
@@ -675,6 +675,93 @@ drcbe_x64::drcbe_x64(drcuml_state &drcuml, device_t &device, drc_cache &cache, u
m_near.flagsunmap[entry] = flags;
}
+ // resolve the actual addresses of the address space handlers
+ auto const resolve_accessor =
+ [] (resolved_handler &handler, address_space &space, auto accessor)
+ {
+ if (MAME_DELEGATE_USE_TYPE == MAME_DELEGATE_TYPE_ITANIUM)
+ {
+ struct { uintptr_t ptr; ptrdiff_t adj; } equiv;
+ assert(sizeof(accessor) == sizeof(equiv));
+ *reinterpret_cast<decltype(accessor) *>(&equiv) = accessor;
+ handler.obj = uintptr_t(reinterpret_cast<u8 *>(&space) + equiv.adj);
+ if (BIT(equiv.ptr, 0))
+ {
+ auto const vptr = *reinterpret_cast<u8 const *const *>(handler.obj) + equiv.ptr - 1;
+ handler.func = *reinterpret_cast<x86code *const *>(vptr);
+ }
+ else
+ {
+ handler.func = reinterpret_cast<x86code *>(equiv.ptr);
+ }
+ }
+ else if (MAME_DELEGATE_USE_TYPE == MAME_DELEGATE_TYPE_MSVC)
+ {
+ // interpret the pointer to member function ignoring the virtual inheritance variant
+ struct single { uintptr_t ptr; };
+ struct multi { uintptr_t ptr; int adj; };
+ struct { uintptr_t ptr; int adj; int vadj; int vindex; } unknown;
+ assert(sizeof(accessor) <= sizeof(unknown));
+ *reinterpret_cast<decltype(accessor) *>(&unknown) = accessor;
+ handler.func = reinterpret_cast<x86code *>(unknown.ptr);
+ handler.obj = uintptr_t(&space);
+ if ((sizeof(unknown) == sizeof(accessor)) && unknown.vindex)
+ {
+ handler.obj += unknown.vadj;
+ auto const vptr = *reinterpret_cast<std::uint8_t const *const *>(handler.obj);
+ handler.obj += *reinterpret_cast<int const *>(vptr + unknown.vindex);
+ }
+ if (sizeof(single) < sizeof(accessor))
+ handler.obj += unknown.adj;
+
+ // walk past thunks
+ while (true)
+ {
+ if (0xe9 == handler.func[0])
+ {
+ // absolute jump with 32-bit displacement
+ handler.func += 5 + *reinterpret_cast<s32 const *>(handler.func + 1);
+ }
+ else if ((0x48 == handler.func[0]) && (0x8b == handler.func[1]) && (0x01 == handler.func[2]) && (0xff == handler.func[3]) && ((0x60 == handler.func[4]) || (0xa0 == handler.func[4])))
+ {
+ // virtual function call thunk
+ auto const vptr = *reinterpret_cast<std::uint8_t const *const *>(handler.obj);
+ if (0x60 == handler.func[4])
+ handler.func = *reinterpret_cast<x86code *const *>(vptr + *reinterpret_cast<s8 const *>(handler.func + 5));
+ else
+ handler.func = *reinterpret_cast<x86code *const *>(vptr + *reinterpret_cast<s32 const *>(handler.func + 5));
+ }
+ else
+ {
+ // not something we can easily bypass
+ break;
+ }
+ }
+ }
+ };
+ m_resolved_accessors.resize(m_space.size());
+ for (int space = 0; m_space.size() > space; ++space)
+ {
+ if (m_space[space])
+ {
+ resolve_accessor(m_resolved_accessors[space].read_byte, *m_space[space], static_cast<u8 (address_space::*)(offs_t) >(&address_space::read_byte));
+ resolve_accessor(m_resolved_accessors[space].read_word, *m_space[space], static_cast<u16 (address_space::*)(offs_t) >(&address_space::read_word));
+ resolve_accessor(m_resolved_accessors[space].read_word_masked, *m_space[space], static_cast<u16 (address_space::*)(offs_t, u16)>(&address_space::read_word));
+ resolve_accessor(m_resolved_accessors[space].read_dword, *m_space[space], static_cast<u32 (address_space::*)(offs_t) >(&address_space::read_dword));
+ resolve_accessor(m_resolved_accessors[space].read_dword_masked, *m_space[space], static_cast<u32 (address_space::*)(offs_t, u32)>(&address_space::read_dword));
+ resolve_accessor(m_resolved_accessors[space].read_qword, *m_space[space], static_cast<u64 (address_space::*)(offs_t) >(&address_space::read_qword));
+ resolve_accessor(m_resolved_accessors[space].read_qword_masked, *m_space[space], static_cast<u64 (address_space::*)(offs_t, u64)>(&address_space::read_qword));
+
+ resolve_accessor(m_resolved_accessors[space].write_byte, *m_space[space], static_cast<void (address_space::*)(offs_t, u8) >(&address_space::write_byte));
+ resolve_accessor(m_resolved_accessors[space].write_word, *m_space[space], static_cast<void (address_space::*)(offs_t, u16) >(&address_space::write_word));
+ resolve_accessor(m_resolved_accessors[space].write_word_masked, *m_space[space], static_cast<void (address_space::*)(offs_t, u16, u16)>(&address_space::write_word));
+ resolve_accessor(m_resolved_accessors[space].write_dword, *m_space[space], static_cast<void (address_space::*)(offs_t, u32) >(&address_space::write_dword));
+ resolve_accessor(m_resolved_accessors[space].write_dword_masked, *m_space[space], static_cast<void (address_space::*)(offs_t, u32, u32)>(&address_space::write_dword));
+ resolve_accessor(m_resolved_accessors[space].write_qword, *m_space[space], static_cast<void (address_space::*)(offs_t, u64) >(&address_space::write_qword));
+ resolve_accessor(m_resolved_accessors[space].write_qword_masked, *m_space[space], static_cast<void (address_space::*)(offs_t, u64, u64)>(&address_space::write_qword));
+ }
+ }
+
// build the opcode table (static but it doesn't hurt to regenerate it)
for (auto & elem : s_opcode_table_source)
s_opcode_table[elem.opcode] = elem.func;
@@ -2231,32 +2318,65 @@ void drcbe_x64::op_read(Assembler &a, const instruction &inst)
// pick a target register for the general case
Gp dstreg = dstp.select_register(eax);
- // set up a call to the read byte handler
- mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ // set up a call to the read handler
+ auto &trampolines = m_accessors[spacesizep.space()];
+ auto &resolved = m_resolved_accessors[spacesizep.space()];
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
if (spacesizep.size() == SIZE_BYTE)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_byte);
- // call read_byte
+ if (resolved.read_byte.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_byte.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_byte.func, rax); // call read_byte
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_byte); // call read_byte
+ }
a.movzx(dstreg, al); // movzx dstreg,al
}
else if (spacesizep.size() == SIZE_WORD)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_word);
- // call read_word
+ if (resolved.read_word.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_word.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_word.func, rax); // call read_word
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_word); // call read_word
+ }
a.movzx(dstreg, ax); // movzx dstreg,ax
}
else if (spacesizep.size() == SIZE_DWORD)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_dword);
- // call read_dword
+ if (resolved.read_dword.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_dword.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_dword.func, rax); // call read_dword
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_dword); // call read_dword
+ }
if (dstreg != eax || inst.size() == 8)
a.mov(dstreg, eax); // mov dstreg,eax
}
else if (spacesizep.size() == SIZE_QWORD)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_qword);
- // call read_qword
+ if (resolved.read_qword.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_qword.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_qword.func, rax); // call read_qword
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_qword); // call read_qword
+ }
if (dstreg != eax)
a.mov(dstreg.r64(), rax); // mov dstreg,rax
}
@@ -2290,8 +2410,9 @@ void drcbe_x64::op_readm(Assembler &a, const instruction &inst)
// pick a target register for the general case
Gp dstreg = dstp.select_register(eax);
- // set up a call to the read byte handler
- mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ // set up a call to the read handler
+ auto &trampolines = m_accessors[spacesizep.space()];
+ auto &resolved = m_resolved_accessors[spacesizep.space()];
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
if (spacesizep.size() != SIZE_QWORD)
mov_reg_param(a, Gpd(REG_PARAM3), maskp); // mov param3,maskp
@@ -2299,21 +2420,45 @@ void drcbe_x64::op_readm(Assembler &a, const instruction &inst)
mov_reg_param(a, Gpq(REG_PARAM3), maskp); // mov param3,maskp
if (spacesizep.size() == SIZE_WORD)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_word_masked);
- // call read_word_masked
+ if (resolved.read_word_masked.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_word_masked.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_word_masked.func, rax); // call read_byte_masked
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_word_masked); // call read_word_masked
+ }
a.movzx(dstreg, ax); // movzx dstreg,ax
}
else if (spacesizep.size() == SIZE_DWORD)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_dword_masked);
- // call read_dword_masked
+ if (resolved.read_dword_masked.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_dword_masked.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_dword_masked.func, rax); // call read_dword_masked
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_dword_masked); // call read_word_masked
+ }
if (dstreg != eax || inst.size() == 8)
a.mov(dstreg, eax); // mov dstreg,eax
}
else if (spacesizep.size() == SIZE_QWORD)
{
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].read_qword_masked);
- // call read_qword_masked
+ if (resolved.read_qword_masked.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.read_qword_masked.obj); // mov param1,space
+ smart_call_r64(a, resolved.read_qword_masked.func, rax); // call read_qword_masked
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.read_qword_masked); // call read_word_masked
+ }
if (dstreg != eax)
a.mov(dstreg.r64(), rax); // mov dstreg,rax
}
@@ -2343,21 +2488,66 @@ void drcbe_x64::op_write(Assembler &a, const instruction &inst)
const parameter &spacesizep = inst.param(2);
assert(spacesizep.is_size_space());
- // set up a call to the write byte handler
- mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ // set up a call to the write handler
+ auto &trampolines = m_accessors[spacesizep.space()];
+ auto &resolved = m_resolved_accessors[spacesizep.space()];
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
if (spacesizep.size() != SIZE_QWORD)
mov_reg_param(a, Gpd(REG_PARAM3), srcp); // mov param3,srcp
else
mov_reg_param(a, Gpq(REG_PARAM3), srcp); // mov param3,srcp
if (spacesizep.size() == SIZE_BYTE)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_byte); // call write_byte
+ {
+ if (resolved.write_byte.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_byte.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_byte.func, rax); // call write_byte
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_byte); // call write_byte
+ }
+ }
else if (spacesizep.size() == SIZE_WORD)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_word); // call write_word
+ {
+ if (resolved.write_word.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_word.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_word.func, rax); // call write_word
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_word); // call write_word
+ }
+ }
else if (spacesizep.size() == SIZE_DWORD)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_dword); // call write_dword
+ {
+ if (resolved.write_dword.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_dword.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_dword.func, rax); // call write_dword
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_dword); // call write_dword
+ }
+ }
else if (spacesizep.size() == SIZE_QWORD)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_qword); // call write_qword
+ {
+ if (resolved.write_qword.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_qword.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_qword.func, rax); // call write_qword
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_qword); // call write_qword
+ }
+ }
}
@@ -2379,8 +2569,9 @@ void drcbe_x64::op_writem(Assembler &a, const instruction &inst)
const parameter &spacesizep = inst.param(3);
assert(spacesizep.is_size_space());
- // set up a call to the write byte handler
- mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ // set up a call to the write handler
+ auto &trampolines = m_accessors[spacesizep.space()];
+ auto &resolved = m_resolved_accessors[spacesizep.space()];
mov_reg_param(a, Gpd(REG_PARAM2), addrp); // mov param2,addrp
if (spacesizep.size() != SIZE_QWORD)
{
@@ -2393,14 +2584,44 @@ void drcbe_x64::op_writem(Assembler &a, const instruction &inst)
mov_reg_param(a, Gpq(REG_PARAM4), maskp); // mov param4,maskp
}
if (spacesizep.size() == SIZE_WORD)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_word_masked);
- // call write_word_masked
+ {
+ if (resolved.write_word.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_word_masked.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_word_masked.func, rax); // call write_word_masked
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_word_masked); // call write_word_masked
+ }
+ }
else if (spacesizep.size() == SIZE_DWORD)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_dword_masked);
- // call write_dword_masked
+ {
+ if (resolved.write_word.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_dword_masked.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_dword_masked.func, rax); // call write_dword_masked
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_dword_masked); // call write_dword_masked
+ }
+ }
else if (spacesizep.size() == SIZE_QWORD)
- smart_call_m64(a, (x86code **)&m_accessors[spacesizep.space()].write_qword_masked);
- // call write_qword_masked
+ {
+ if (resolved.write_word.func)
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), resolved.write_qword_masked.obj); // mov param1,space
+ smart_call_r64(a, resolved.write_qword_masked.func, rax); // call write_qword_masked
+ }
+ else
+ {
+ mov_r64_imm(a, Gpq(REG_PARAM1), (uintptr_t)m_space[spacesizep.space()]); // mov param1,space
+ smart_call_m64(a, (x86code **)&trampolines.write_qword_masked); // call write_qword_masked
+ }
+ }
}
diff --git a/src/devices/cpu/drcbex64.h b/src/devices/cpu/drcbex64.h
index c73573839c9..08fb0658744 100644
--- a/src/devices/cpu/drcbex64.h
+++ b/src/devices/cpu/drcbex64.h
@@ -18,6 +18,8 @@
#include "asmjit/src/asmjit/asmjit.h"
+#include <vector>
+
namespace drc {
@@ -230,9 +232,9 @@ private:
x86log_context * m_log; // logging
FILE * m_log_asmjit;
- uint32_t * m_absmask32; // absolute value mask (32-bit)
- uint64_t * m_absmask64; // absolute value mask (32-bit)
- uint8_t * m_rbpvalue; // value of RBP
+ uint32_t * m_absmask32; // absolute value mask (32-bit)
+ uint64_t * m_absmask64; // absolute value mask (32-bit)
+ uint8_t * m_rbpvalue; // value of RBP
x86_entry_point_func m_entry; // entry point
x86code * m_exit; // exit point
@@ -246,22 +248,46 @@ private:
x86code * debug_log_hashjmp_fail; // hashjmp debugging
x86code * drcmap_get_value; // map lookup helper
- uint32_t ssemode; // saved SSE mode
- uint32_t ssemodesave; // temporary location for saving
- uint32_t ssecontrol[4]; // copy of the sse_control array
+ uint32_t ssemode; // saved SSE mode
+ uint32_t ssemodesave; // temporary location for saving
+ uint32_t ssecontrol[4]; // copy of the sse_control array
float single1; // 1.0 is single-precision
double double1; // 1.0 in double-precision
void * stacksave; // saved stack pointer
void * hashstacksave; // saved stack pointer for hashjmp
- uint8_t flagsmap[0x1000]; // flags map
- uint64_t flagsunmap[0x20]; // flags unmapper
+ uint8_t flagsmap[0x1000]; // flags map
+ uint64_t flagsunmap[0x20]; // flags unmapper
};
near_state & m_near;
+ // resolved memory handler functions
+ struct resolved_handler { uintptr_t obj = 0; x86code *func = nullptr; };
+ struct resolved_accessors
+ {
+
+ resolved_handler read_byte;
+ resolved_handler read_word;
+ resolved_handler read_word_masked;
+ resolved_handler read_dword;
+ resolved_handler read_dword_masked;
+ resolved_handler read_qword;
+ resolved_handler read_qword_masked;
+
+ resolved_handler write_byte;
+ resolved_handler write_word;
+ resolved_handler write_word_masked;
+ resolved_handler write_dword;
+ resolved_handler write_dword_masked;
+ resolved_handler write_qword;
+ resolved_handler write_qword_masked;
+ };
+ using resolved_accessors_vector = std::vector<resolved_accessors>;
+ resolved_accessors_vector m_resolved_accessors;
+
// globals
- typedef void (drcbe_x64::*opcode_generate_func)(asmjit::x86::Assembler &a, const uml::instruction &inst);
+ using opcode_generate_func = void (drcbe_x64::*)(asmjit::x86::Assembler &, const uml::instruction &);
struct opcode_table_entry
{
uml::opcode_t opcode; // opcode in question
diff --git a/src/emu/validity.cpp b/src/emu/validity.cpp
index 539b6f3a747..3ead00ec99f 100644
--- a/src/emu/validity.cpp
+++ b/src/emu/validity.cpp
@@ -100,6 +100,18 @@ class diamond_inheritance : public virtual_derived_a, public virtual_derived_b
//-------------------------------------------------
+// pure_virtual_base - abstract class with a
+// vtable
+//-------------------------------------------------
+
+struct pure_virtual_base
+{
+ virtual ~pure_virtual_base() = default;
+ virtual char operator()(void const *&p) const = 0;
+};
+
+
+//-------------------------------------------------
// ioport_string_from_index - return an indexed
// string from the I/O port system
//-------------------------------------------------
@@ -1419,18 +1431,12 @@ void validate_delegates_mfp()
void validate_delegates_latebind()
{
- struct target
- {
- virtual ~target() = default;
- virtual char operator()(void const *&p) const = 0;
- };
-
- struct derived_a : target, delegate_late_bind
+ struct derived_a : pure_virtual_base, delegate_late_bind
{
virtual char operator()(void const *&p) const override { p = this; return 'a'; }
};
- struct derived_b : target, delegate_late_bind
+ struct derived_b : pure_virtual_base, delegate_late_bind
{
virtual char operator()(void const *&p) const override { p = this; return 'b'; }
};
@@ -1446,7 +1452,7 @@ void validate_delegates_latebind()
unrelated u;
// delegate with no target object
- test_delegate cb1(&target::operator(), static_cast<target *>(nullptr));
+ test_delegate cb1(&pure_virtual_base::operator(), static_cast<pure_virtual_base *>(nullptr));
// test late bind on construction
test_delegate cb2(cb1, a);
@@ -1482,14 +1488,14 @@ void validate_delegates_latebind()
}
catch (binding_type_exception const &e)
{
- if ((e.target_type() != typeid(target)) || (e.actual_type() != typeid(unrelated)))
+ if ((e.target_type() != typeid(pure_virtual_base)) || (e.actual_type() != typeid(unrelated)))
{
osd_printf_error(
"Error testing delegate late bind type error %s -> %s (expected %s -> %s)\n",
e.actual_type().name(),
e.target_type().name(),
typeid(unrelated).name(),
- typeid(target).name());
+ typeid(pure_virtual_base).name());
}
ch = '+';
}
@@ -1497,7 +1503,7 @@ void validate_delegates_latebind()
osd_printf_error("Error testing delegate late bind type error\n");
// test syntax for creating delegate with alternate late bind base
- delegate<char (void const *&), target> cb4(
+ delegate<char (void const *&), pure_virtual_base> cb4(
[] (auto &o, void const *&p) { p = &o; return 'l'; },
static_cast<unrelated *>(nullptr));
try { cb1.late_bind(a); }
diff --git a/src/lib/util/delegate.cpp b/src/lib/util/delegate.cpp
index 33d5f8d54e8..346ba1283a0 100644
--- a/src/lib/util/delegate.cpp
+++ b/src/lib/util/delegate.cpp
@@ -113,10 +113,11 @@ delegate_generic_function delegate_mfp_itanium::convert_to_generic(delegate_gene
//-------------------------------------------------
// delegate_mfp_msvc::adjust_this_pointer - given
// an object pointer and member function pointer,
-// apply the displacement
+// apply the displacement, and walk past
+// recognisable thunks
//-------------------------------------------------
-void delegate_mfp_msvc::adjust_this_pointer(delegate_generic_class *&object) const
+delegate_generic_function delegate_mfp_msvc::adjust_this_pointer(delegate_generic_class *&object) const
{
LOG("Input this=%p ", reinterpret_cast<void const *>(object));
if (sizeof(single_base_equiv) < m_size)
@@ -124,16 +125,53 @@ void delegate_mfp_msvc::adjust_this_pointer(delegate_generic_class *&object) con
if (sizeof(unknown_base_equiv) == m_size)
LOG("vptrdelta=%d vindex=%d ", m_vptr_offs, m_vt_index);
std::uint8_t *byteptr = reinterpret_cast<std::uint8_t *>(object);
+
+ // test for pointer to member function cast across virtual inheritance relationship
if ((sizeof(unknown_base_equiv) == m_size) && m_vt_index)
{
+ // add offset from "this" pointer to location of vptr, and add offset to virtual base from vtable
byteptr += m_vptr_offs;
std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(byteptr);
byteptr += *reinterpret_cast<int const *>(vptr + m_vt_index);
}
+
+ // add "this" pointer displacement if present in the pointer to member function
if (sizeof(single_base_equiv) < m_size)
byteptr += m_this_delta;
LOG("Calculated this=%p\n", reinterpret_cast<void const *>(byteptr));
object = reinterpret_cast<delegate_generic_class *>(byteptr);
+
+ // walk past recognisable thunks
+ std::uint8_t const *func = reinterpret_cast<std::uint8_t const *>(m_function);
+#if defined(__x86_64__) || defined(_M_X64)
+ while (true)
+ {
+ if (0xe9 == func[0])
+ {
+ // absolute jump with 32-bit displacement
+ LOG("Found relative jump at %p ", func);
+ func += 5 + *reinterpret_cast<std::int32_t const *>(func + 1);
+ LOG("redirecting to %p\n", func);
+ }
+ else if ((0x48 == func[0]) && (0x8b == func[1]) && (0x01 == func[2]) && (0xff == func[3]) && ((0x60 == func[4]) || (0xa0 == func[4])))
+ {
+ // virtual function call thunk - mov rax,QWORD PTR [rcx] ; jmp QWORD PTR [rax+...]
+ LOG("Found virtual member function thunk at %p ", func);
+ std::uint8_t const *const vptr = *reinterpret_cast<std::uint8_t const *const *>(object);
+ if (0x60 == func[4])
+ func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int8_t const *>(func + 5));
+ else
+ func = *reinterpret_cast<std::uint8_t const *const *>(vptr + *reinterpret_cast<std::int32_t const *>(func + 5));
+ LOG("redirecting to %p\n", func);
+ }
+ else
+ {
+ // not something we can easily bypass
+ break;
+ }
+ }
+#endif
+ return reinterpret_cast<delegate_generic_function>(std::uintptr_t(func));
}
} // namespace util::detail
diff --git a/src/lib/util/delegate.h b/src/lib/util/delegate.h
index 59e83f82617..f79f8a110ad 100644
--- a/src/lib/util/delegate.h
+++ b/src/lib/util/delegate.h
@@ -506,8 +506,7 @@ public:
template <typename FunctionType>
void update_after_bind(FunctionType &funcptr, delegate_generic_class *&object)
{
- funcptr = reinterpret_cast<FunctionType>(m_function);
- return adjust_this_pointer(object);
+ funcptr = reinterpret_cast<FunctionType>(adjust_this_pointer(object));
}
template <typename FunctionType>
@@ -516,8 +515,8 @@ public:
}
private:
- // adjust the object pointer
- void adjust_this_pointer(delegate_generic_class *&object) const;
+ // adjust the object pointer and bypass thunks
+ delegate_generic_function adjust_this_pointer(delegate_generic_class *&object) const;
// actual state
uintptr_t m_function = 0; // pointer to function or non-virtual thunk for virtual function call